code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() _a : Optional[Any] = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def a__ ( a : List[str] , a : Any , a : Optional[Any] , a : Tuple , a : Union[str, Any]=False , a : List[str]=True ): """simple docstring""" if model_type not in MODEL_CLASSES: raise ValueError(f'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' ) _snake_case : Tuple = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: _snake_case : List[str] = cached_file(a , a , force_download=not use_cached_models ) _snake_case : str = config_class.from_json_file(a ) _snake_case : Optional[int] = True _snake_case : Any = True print(f'Building TensorFlow model from configuration: {config}' ) _snake_case : Union[str, Any] = model_class(a ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): _snake_case : List[Any] = cached_file( a , a , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: _snake_case : str = load_pytorch_checkpoint_in_tfa_model(a , a ) if compare_with_pt_model: _snake_case : List[str] = tf_model(tf_model.dummy_inputs , training=a ) # build the network _snake_case : Dict = torch.load(a , map_location="cpu" ) _snake_case : str = pt_model_class.from_pretrained( pretrained_model_name_or_path=a , config=a , state_dict=a ) with torch.no_grad(): _snake_case : Optional[int] = pt_model(**pt_model.dummy_inputs ) _snake_case : Tuple = pto[0].numpy() _snake_case : Optional[int] = tfo[0].numpy() _snake_case : List[Any] = np.amax(np.abs(np_pt - np_tf ) ) print(f'Max absolute difference between models outputs {diff}' ) assert diff <= 2e-2, f'Error, model absolute difference is >2e-2: {diff}' # Save pytorch-model print(f'Save TensorFlow model to {tf_dump_path}' ) tf_model.save_weights(a , save_format="h5" ) def a__ ( a : Dict , a : Optional[Any] , a : str=None , a : Any=None , a : Optional[Any]=False , a : List[str]=False , a : Union[str, Any]=False , a : List[Any]=False , ): """simple docstring""" if args_model_type is None: _snake_case : Union[str, Any] = list(MODEL_CLASSES.keys() ) else: _snake_case : Tuple = [args_model_type] for j, model_type in enumerate(a , start=1 ): print("=" * 100 ) print(f' Converting model type {j}/{len(a )}: {model_type}' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' ) _snake_case : Union[str, Any] = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: _snake_case : Union[str, Any] = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: _snake_case : Dict = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(a , a ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f' Skipping finetuned checkpoint {model_shortcut_name}' ) continue _snake_case : str = model_shortcut_name elif only_convert_finetuned_models: print(f' Skipping not finetuned checkpoint {model_shortcut_name}' ) continue print( f' Converting checkpoint {i}/{len(a )}: {model_shortcut_name} - model_type {model_type}' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: _snake_case : int = cached_file(a , a , force_download=not use_cached_models ) else: _snake_case : Dict = config_shortcut_name if model_shortcut_name in aws_model_maps: _snake_case : List[Any] = cached_file(a , a , force_download=not use_cached_models ) else: _snake_case : Any = model_shortcut_name if os.path.isfile(a ): _snake_case : Optional[Any] = "converted_model" convert_pt_checkpoint_to_tf( model_type=a , pytorch_checkpoint_path=a , config_file=a , tf_dump_path=os.path.join(a , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=a , ) if remove_cached_files: os.remove(a ) os.remove(a ) if __name__ == "__main__": _a : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( f'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ' """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") _a : Dict = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
703
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class _UpperCAmelCase ( _snake_case): __lowercase : int = """EncodecFeatureExtractor""" __lowercase : str = ("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self , snake_case_ , snake_case_ ): super().__init__(snake_case_ , snake_case_ ) _snake_case : Dict = self.feature_extractor _snake_case : Any = False def lowerCamelCase__ ( self , snake_case_=None , snake_case_=None , snake_case_=True ): return self.tokenizer.get_decoder_prompt_ids(task=snake_case_ , language=snake_case_ , no_timestamps=snake_case_ ) def __call__( self , *snake_case_ , **snake_case_ ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*snake_case_ , **snake_case_ ) _snake_case : str = kwargs.pop("audio" , snake_case_ ) _snake_case : Optional[int] = kwargs.pop("sampling_rate" , snake_case_ ) _snake_case : Optional[Any] = kwargs.pop("text" , snake_case_ ) if len(snake_case_ ) > 0: _snake_case : Any = args[0] _snake_case : Union[str, Any] = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if text is not None: _snake_case : Any = self.tokenizer(snake_case_ , **snake_case_ ) if audio is not None: _snake_case : Any = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ ) if audio is None: return inputs elif text is None: return audio_inputs else: _snake_case : str = audio_inputs["input_values"] if "padding_mask" in audio_inputs: _snake_case : List[str] = audio_inputs["padding_mask"] return inputs def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ): _snake_case : Tuple = kwargs.pop("audio" , snake_case_ ) _snake_case : List[str] = kwargs.pop("padding_mask" , snake_case_ ) if len(snake_case_ ) > 0: _snake_case : Tuple = args[0] _snake_case : Dict = args[1:] if audio_values is not None: return self._decode_audio(snake_case_ , padding_mask=snake_case_ ) else: return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ ) def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ): return self.tokenizer.decode(*snake_case_ , **snake_case_ ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ): _snake_case : Optional[int] = to_numpy(snake_case_ ) _snake_case , _snake_case , _snake_case : Tuple = audio_values.shape if padding_mask is None: return list(snake_case_ ) _snake_case : Optional[int] = to_numpy(snake_case_ ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) _snake_case : Any = seq_len - padding_mask.shape[-1] _snake_case : Optional[Any] = 1 - self.feature_extractor.padding_value _snake_case : Optional[int] = np.pad(snake_case_ , ((0, 0), (0, difference)) , "constant" , constant_values=snake_case_ ) _snake_case : Any = audio_values.tolist() for i in range(snake_case_ ): _snake_case : Tuple = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] _snake_case : Tuple = sliced_audio.reshape(snake_case_ , -1 ) return audio_values
87
0
import re from filelock import FileLock try: import nltk _a : List[Any] = True except (ImportError, ModuleNotFoundError): _a : Any = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def a__ ( a : str ): """simple docstring""" re.sub("<n>" , "" , a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(a ) )
704
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _a : str = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[Any] = ["""YolosFeatureExtractor"""] _a : List[Any] = ["""YolosImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Union[str, Any] = [ """YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""", """YolosForObjectDetection""", """YolosModel""", """YolosPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys _a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" from __future__ import annotations _a : Dict = 10 def a__ ( a : list[int] ): """simple docstring""" _snake_case : Any = 1 _snake_case : Optional[int] = max(a ) while placement <= max_digit: # declare and initialize empty buckets _snake_case : list[list] = [[] for _ in range(a )] # split list_of_ints between the buckets for i in list_of_ints: _snake_case : Tuple = int((i / placement) % RADIX ) buckets[tmp].append(a ) # put each buckets' contents into list_of_ints _snake_case : List[str] = 0 for b in range(a ): for i in buckets[b]: _snake_case : List[str] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
705
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Optional[int] = dataset _snake_case : str = process _snake_case : int = params def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): _snake_case : Union[str, Any] = self.dataset[i] _snake_case : Optional[Any] = self.process(snake_case_ , **self.params ) return processed class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ): _snake_case : Union[str, Any] = loader _snake_case : Tuple = infer _snake_case : List[Any] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _snake_case : int = None _snake_case : int = loader_batch_size # Internal bookkeeping _snake_case : Any = None _snake_case : Dict = None def __len__( self ): return len(self.loader ) def __iter__( self ): _snake_case : int = iter(self.loader ) return self def lowerCamelCase__ ( self ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _snake_case : List[Any] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _snake_case : int = {} for k, element in self._loader_batch_data.items(): if isinstance(snake_case_ , snake_case_ ): # Convert ModelOutput to tuple first _snake_case : Tuple = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _snake_case : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _snake_case : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case_ , snake_case_ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _snake_case : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _snake_case : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _snake_case : Tuple = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _snake_case : Union[str, Any] = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _snake_case : List[Any] = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _snake_case : int = self._loader_batch_data.__class__(snake_case_ ) self._loader_batch_index += 1 return result def lowerCamelCase__ ( self ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _snake_case : Tuple = next(self.iterator ) _snake_case : Any = self.infer(snake_case_ , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(snake_case_ , torch.Tensor ): _snake_case : Union[str, Any] = processed else: _snake_case : Optional[int] = list(processed.keys() )[0] _snake_case : List[str] = processed[key] if isinstance(snake_case_ , snake_case_ ): _snake_case : Dict = len(snake_case_ ) else: _snake_case : Optional[int] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _snake_case : Union[str, Any] = observed_batch_size # Setting internal index to unwrap the batch _snake_case : str = processed _snake_case : List[Any] = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ): super().__init__(snake_case_ , snake_case_ , snake_case_ ) def __iter__( self ): _snake_case : Tuple = iter(self.loader ) _snake_case : List[Any] = None return self def lowerCamelCase__ ( self ): if self.subiterator is None: _snake_case : Optional[Any] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _snake_case : Union[str, Any] = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _snake_case : str = self.infer(next(self.iterator ) , **self.params ) _snake_case : Tuple = next(self.subiterator ) return processed class _UpperCAmelCase ( _snake_case): def __iter__( self ): _snake_case : Optional[Any] = iter(self.loader ) return self def lowerCamelCase__ ( self ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. _snake_case : Optional[Any] = False _snake_case : Tuple = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _snake_case : Union[str, Any] = self.loader_batch_item() _snake_case : str = item.pop("is_last" ) accumulator.append(snake_case_ ) if is_last: return accumulator while not is_last: _snake_case : List[str] = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(snake_case_ , torch.Tensor ): _snake_case : Union[str, Any] = processed else: _snake_case : Tuple = list(processed.keys() )[0] _snake_case : Tuple = processed[key] if isinstance(snake_case_ , snake_case_ ): _snake_case : Any = len(snake_case_ ) else: _snake_case : List[Any] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _snake_case : Dict = observed_batch_size _snake_case : List[Any] = processed _snake_case : List[str] = 0 while self._loader_batch_index < self.loader_batch_size: _snake_case : Union[str, Any] = self.loader_batch_item() _snake_case : int = item.pop("is_last" ) accumulator.append(snake_case_ ) if is_last: return accumulator else: _snake_case : Dict = processed _snake_case : Dict = item.pop("is_last" ) accumulator.append(snake_case_ ) return accumulator class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ ): _snake_case : str = dataset _snake_case : Any = key def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): return self.dataset[i][self.key] class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : int = dataset _snake_case : Any = keya _snake_case : int = keya def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
87
0
"""simple docstring""" import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( _snake_case , unittest.TestCase): __lowercase : Tuple = AudioLDMPipeline __lowercase : str = TEXT_TO_AUDIO_PARAMS __lowercase : str = TEXT_TO_AUDIO_BATCH_PARAMS __lowercase : Optional[int] = frozenset( [ """num_inference_steps""", """num_waveforms_per_prompt""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ]) def lowerCamelCase__ ( self ): torch.manual_seed(0 ) _snake_case : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(32, 64) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=snake_case_ , ) _snake_case : List[Any] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , ) torch.manual_seed(0 ) _snake_case : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _snake_case : int = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , ) _snake_case : Tuple = ClapTextModelWithProjection(snake_case_ ) _snake_case : Union[str, Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=77 ) _snake_case : int = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case_ , ) _snake_case : Tuple = SpeechTaHifiGan(snake_case_ ) _snake_case : Optional[int] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "vocoder": vocoder, } return components def lowerCamelCase__ ( self , snake_case_ , snake_case_=0 ): if str(snake_case_ ).startswith("mps" ): _snake_case : Optional[Any] = torch.manual_seed(snake_case_ ) else: _snake_case : List[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) _snake_case : Union[str, Any] = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator _snake_case : Optional[int] = self.get_dummy_components() _snake_case : Optional[int] = AudioLDMPipeline(**snake_case_ ) _snake_case : Any = audioldm_pipe.to(snake_case_ ) audioldm_pipe.set_progress_bar_config(disable=snake_case_ ) _snake_case : List[str] = self.get_dummy_inputs(snake_case_ ) _snake_case : List[Any] = audioldm_pipe(**snake_case_ ) _snake_case : Optional[Any] = output.audios[0] assert audio.ndim == 1 assert len(snake_case_ ) == 2_56 _snake_case : Union[str, Any] = audio[:10] _snake_case : int = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def lowerCamelCase__ ( self ): _snake_case : int = self.get_dummy_components() _snake_case : Any = AudioLDMPipeline(**snake_case_ ) _snake_case : Any = audioldm_pipe.to(snake_case_ ) _snake_case : List[str] = audioldm_pipe.to(snake_case_ ) audioldm_pipe.set_progress_bar_config(disable=snake_case_ ) _snake_case : Union[str, Any] = self.get_dummy_inputs(snake_case_ ) _snake_case : str = 3 * [inputs["prompt"]] # forward _snake_case : Dict = audioldm_pipe(**snake_case_ ) _snake_case : List[str] = output.audios[0] _snake_case : Optional[int] = self.get_dummy_inputs(snake_case_ ) _snake_case : str = 3 * [inputs.pop("prompt" )] _snake_case : Optional[Any] = audioldm_pipe.tokenizer( snake_case_ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="pt" , ) _snake_case : Dict = text_inputs["input_ids"].to(snake_case_ ) _snake_case : Dict = audioldm_pipe.text_encoder( snake_case_ , ) _snake_case : str = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state _snake_case : Optional[int] = F.normalize(snake_case_ , dim=-1 ) _snake_case : List[Any] = prompt_embeds # forward _snake_case : Tuple = audioldm_pipe(**snake_case_ ) _snake_case : Optional[int] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def lowerCamelCase__ ( self ): _snake_case : Any = self.get_dummy_components() _snake_case : str = AudioLDMPipeline(**snake_case_ ) _snake_case : str = audioldm_pipe.to(snake_case_ ) _snake_case : List[Any] = audioldm_pipe.to(snake_case_ ) audioldm_pipe.set_progress_bar_config(disable=snake_case_ ) _snake_case : str = self.get_dummy_inputs(snake_case_ ) _snake_case : Any = 3 * ["this is a negative prompt"] _snake_case : int = negative_prompt _snake_case : Any = 3 * [inputs["prompt"]] # forward _snake_case : int = audioldm_pipe(**snake_case_ ) _snake_case : List[str] = output.audios[0] _snake_case : str = self.get_dummy_inputs(snake_case_ ) _snake_case : str = 3 * [inputs.pop("prompt" )] _snake_case : Optional[int] = [] for p in [prompt, negative_prompt]: _snake_case : str = audioldm_pipe.tokenizer( snake_case_ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="pt" , ) _snake_case : Tuple = text_inputs["input_ids"].to(snake_case_ ) _snake_case : Union[str, Any] = audioldm_pipe.text_encoder( snake_case_ , ) _snake_case : List[str] = text_embeds.text_embeds # additional L_2 normalization over each hidden-state _snake_case : int = F.normalize(snake_case_ , dim=-1 ) embeds.append(snake_case_ ) _snake_case : List[Any] = embeds # forward _snake_case : str = audioldm_pipe(**snake_case_ ) _snake_case : Tuple = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def lowerCamelCase__ ( self ): _snake_case : str = "cpu" # ensure determinism for the device-dependent torch.Generator _snake_case : List[Any] = self.get_dummy_components() _snake_case : List[Any] = PNDMScheduler(skip_prk_steps=snake_case_ ) _snake_case : int = AudioLDMPipeline(**snake_case_ ) _snake_case : int = audioldm_pipe.to(snake_case_ ) audioldm_pipe.set_progress_bar_config(disable=snake_case_ ) _snake_case : int = self.get_dummy_inputs(snake_case_ ) _snake_case : Tuple = "egg cracking" _snake_case : Tuple = audioldm_pipe(**snake_case_ , negative_prompt=snake_case_ ) _snake_case : str = output.audios[0] assert audio.ndim == 1 assert len(snake_case_ ) == 2_56 _snake_case : Dict = audio[:10] _snake_case : Dict = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def lowerCamelCase__ ( self ): _snake_case : str = "cpu" # ensure determinism for the device-dependent torch.Generator _snake_case : Any = self.get_dummy_components() _snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=snake_case_ ) _snake_case : int = AudioLDMPipeline(**snake_case_ ) _snake_case : Dict = audioldm_pipe.to(snake_case_ ) audioldm_pipe.set_progress_bar_config(disable=snake_case_ ) _snake_case : List[str] = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) _snake_case : Optional[Any] = audioldm_pipe(snake_case_ , num_inference_steps=2 ).audios assert audios.shape == (1, 2_56) # test num_waveforms_per_prompt=1 (default) for batch of prompts _snake_case : Any = 2 _snake_case : Union[str, Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 2_56) # test num_waveforms_per_prompt for single prompt _snake_case : Optional[Any] = 2 _snake_case : Optional[int] = audioldm_pipe(snake_case_ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case_ ).audios assert audios.shape == (num_waveforms_per_prompt, 2_56) # test num_waveforms_per_prompt for batch of prompts _snake_case : str = 2 _snake_case : Optional[Any] = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case_ ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56) def lowerCamelCase__ ( self ): _snake_case : str = "cpu" # ensure determinism for the device-dependent torch.Generator _snake_case : List[Any] = self.get_dummy_components() _snake_case : Union[str, Any] = AudioLDMPipeline(**snake_case_ ) _snake_case : Dict = audioldm_pipe.to(snake_case_ ) audioldm_pipe.set_progress_bar_config(disable=snake_case_ ) _snake_case : Any = audioldm_pipe.vocoder.config.sampling_rate _snake_case : Optional[int] = self.get_dummy_inputs(snake_case_ ) _snake_case : List[Any] = audioldm_pipe(audio_length_in_s=0.016 , **snake_case_ ) _snake_case : str = output.audios[0] assert audio.ndim == 1 assert len(snake_case_ ) / vocoder_sampling_rate == 0.016 _snake_case : Optional[int] = audioldm_pipe(audio_length_in_s=0.032 , **snake_case_ ) _snake_case : Union[str, Any] = output.audios[0] assert audio.ndim == 1 assert len(snake_case_ ) / vocoder_sampling_rate == 0.032 def lowerCamelCase__ ( self ): _snake_case : str = self.get_dummy_components() _snake_case : Union[str, Any] = AudioLDMPipeline(**snake_case_ ) _snake_case : Optional[int] = audioldm_pipe.to(snake_case_ ) audioldm_pipe.set_progress_bar_config(disable=snake_case_ ) _snake_case : Any = ["hey"] _snake_case : Any = audioldm_pipe(snake_case_ , num_inference_steps=1 ) _snake_case : Any = output.audios.shape assert audio_shape == (1, 2_56) _snake_case : Optional[Any] = audioldm_pipe.vocoder.config config.model_in_dim *= 2 _snake_case : List[str] = SpeechTaHifiGan(snake_case_ ).to(snake_case_ ) _snake_case : List[Any] = audioldm_pipe(snake_case_ , num_inference_steps=1 ) _snake_case : List[Any] = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 2_56) def lowerCamelCase__ ( self ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ ) def lowerCamelCase__ ( self ): self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case_ ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def lowerCamelCase__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ ) @slow class _UpperCAmelCase ( unittest.TestCase): def lowerCamelCase__ ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self , snake_case_ , snake_case_="cpu" , snake_case_=torch.floataa , snake_case_=0 ): _snake_case : int = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) _snake_case : int = np.random.RandomState(snake_case_ ).standard_normal((1, 8, 1_28, 16) ) _snake_case : str = torch.from_numpy(snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ) _snake_case : List[str] = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" ) _snake_case : Any = audioldm_pipe.to(snake_case_ ) audioldm_pipe.set_progress_bar_config(disable=snake_case_ ) _snake_case : Any = self.get_inputs(snake_case_ ) _snake_case : Tuple = 25 _snake_case : Any = audioldm_pipe(**snake_case_ ).audios[0] assert audio.ndim == 1 assert len(snake_case_ ) == 8_19_20 _snake_case : Tuple = audio[7_72_30:7_72_40] _snake_case : Any = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) _snake_case : int = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1E-2 def lowerCamelCase__ ( self ): _snake_case : str = AudioLDMPipeline.from_pretrained("cvssp/audioldm" ) _snake_case : Dict = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) _snake_case : Tuple = audioldm_pipe.to(snake_case_ ) audioldm_pipe.set_progress_bar_config(disable=snake_case_ ) _snake_case : Any = self.get_inputs(snake_case_ ) _snake_case : str = audioldm_pipe(**snake_case_ ).audios[0] assert audio.ndim == 1 assert len(snake_case_ ) == 8_19_20 _snake_case : Any = audio[2_77_80:2_77_90] _snake_case : Dict = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] ) _snake_case : List[str] = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3E-2
706
"""simple docstring""" def a__ ( a : int ): """simple docstring""" if not isinstance(a , a ): raise TypeError("Input value must be an 'int' type" ) _snake_case : Union[str, Any] = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
87
0
"""simple docstring""" import torch def a__ ( ): """simple docstring""" if torch.cuda.is_available(): _snake_case : int = torch.cuda.device_count() else: _snake_case : int = 0 print(f'Successfully ran on {num_gpus} GPUs' ) if __name__ == "__main__": main()
707
"""simple docstring""" from __future__ import annotations import requests _a : List[str] = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def a__ ( a : str , a : int = 1 , a : str = "new" , a : list | None = None ): """simple docstring""" _snake_case : Any = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(a ) - valid_terms ) ): _snake_case : Optional[int] = f'Invalid search term: {invalid_search_terms}' raise ValueError(a ) _snake_case : int = requests.get( f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , ) if response.status_code == 429: raise requests.HTTPError _snake_case : Optional[Any] = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(a )} _snake_case : Tuple = {} for id_ in range(a ): _snake_case : List[str] = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
87
0
"""simple docstring""" import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging _a : Any = logging.get_logger(__name__) logging.set_verbosity_info() def a__ ( a : str , a : str ): """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: _snake_case : Optional[int] = XLMProphetNetForConditionalGenerationOld.from_pretrained(a ) _snake_case : Optional[int] = XLMProphetNetForConditionalGeneration.from_pretrained( a , output_loading_info=a ) else: _snake_case : Optional[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(a ) _snake_case : List[Any] = ProphetNetForConditionalGeneration.from_pretrained( a , output_loading_info=a ) _snake_case : Dict = ["key_proj", "value_proj", "query_proj"] _snake_case : Optional[Any] = { "self_attn": "ngram_self_attn", "cross_attn": "encoder_attn", "cross_attn_layer_norm": "encoder_attn_layer_norm", "feed_forward_layer_norm": "final_layer_norm", "feed_forward": "", "intermediate": "fc1", "output": "fc2", "key_proj": "k_proj", "query_proj": "q_proj", "value_proj": "v_proj", "word_embeddings": "embed_tokens", "embeddings_layer_norm": "emb_layer_norm", "relative_pos_embeddings": "relative_linear", "ngram_embeddings": "ngram_input_embed", "position_embeddings": "embed_positions", } for key in loading_info["missing_keys"]: _snake_case : Optional[int] = key.split("." ) if attributes[0] == "lm_head": _snake_case : Optional[int] = prophet _snake_case : List[str] = prophet_old else: _snake_case : int = prophet.prophetnet _snake_case : int = prophet_old.model _snake_case : Dict = False for attribute in attributes: if attribute in mapping: _snake_case : Tuple = mapping[attribute] if not hasattr(a , a ) and len(a ) > 0: _snake_case : Tuple = attribute elif hasattr(a , a ): _snake_case : List[Any] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" _snake_case : List[str] = old_model.weight logger.info(f'{attribute} is initialized.' ) _snake_case : Optional[int] = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" _snake_case : int = old_model.bias logger.info(f'{attribute} is initialized' ) _snake_case : Union[str, Any] = True break elif attribute in special_keys and hasattr(a , "in_proj_weight" ): _snake_case : List[Any] = old_model.in_proj_weight.shape[0] // 3 _snake_case : List[str] = getattr(a , a ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": _snake_case : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) _snake_case : List[str] = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": _snake_case : Tuple = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) _snake_case : str = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": _snake_case : Optional[int] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) _snake_case : Optional[int] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) _snake_case : Union[str, Any] = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." _snake_case : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] ) _snake_case : Dict = True break if attribute.isdigit(): _snake_case : Optional[int] = model[int(a )] _snake_case : List[str] = old_model[int(a )] else: _snake_case : Optional[Any] = getattr(a , a ) if old_attribute == "": _snake_case : int = old_model else: if not hasattr(a , a ): raise ValueError(f'{old_model} does not have {old_attribute}' ) _snake_case : str = getattr(a , a ) if not is_key_init: raise ValueError(f'{key} was not correctly initialized!' ) print(f'Saving model to {pytorch_dump_folder_path}' ) prophet.save_pretrained(a ) if __name__ == "__main__": _a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _a : int = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
708
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def a__ ( a : float , a : float , a : bool = False ): """simple docstring""" if radian_mode: return [magnitude * cos(a ), magnitude * sin(a )] return [magnitude * cos(radians(a ) ), magnitude * sin(radians(a ) )] def a__ ( a : NDArray[floataa] , a : NDArray[floataa] , a : float = 10**-1 ): """simple docstring""" _snake_case : NDArray[floataa] = cross(a , a ) _snake_case : float = sum(a ) return abs(a ) < eps if __name__ == "__main__": # Test to check if it works _a : Tuple = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) _a : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg _a : List[Any] = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) _a : List[Any] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg _a : List[str] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]]) _a : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
87
0
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin _a : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right _a : Tuple = 250_004 _a : str = 250_020 @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( _snake_case , unittest.TestCase): __lowercase : str = MBartaaTokenizer __lowercase : List[Any] = MBartaaTokenizerFast __lowercase : Dict = True __lowercase : int = True def lowerCamelCase__ ( self ): super().setUp() # We have a SentencePiece fixture for testing _snake_case : List[str] = MBartaaTokenizer(snake_case_ , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=snake_case_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = "<s>" _snake_case : List[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(snake_case_ ) , 10_54 ) def lowerCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 10_54 ) def lowerCamelCase__ ( self ): _snake_case : Any = MBartaaTokenizer(snake_case_ , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=snake_case_ ) _snake_case : Any = tokenizer.tokenize("This is a test" ) self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) _snake_case : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( snake_case_ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) _snake_case : Dict = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual( snake_case_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case_ ) self.assertListEqual( snake_case_ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def lowerCamelCase__ ( self ): # fmt: off _snake_case : Tuple = {"input_ids": [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case_ , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , ) def lowerCamelCase__ ( self ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _snake_case : List[str] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _snake_case : Dict = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ ) _snake_case : List[str] = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ ) _snake_case : int = tempfile.mkdtemp() _snake_case : Union[str, Any] = tokenizer_r.save_pretrained(snake_case_ ) _snake_case : Union[str, Any] = tokenizer_p.save_pretrained(snake_case_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) _snake_case : Dict = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(snake_case_ , snake_case_ ) # Checks everything loads correctly in the same way _snake_case : Tuple = tokenizer_r.from_pretrained(snake_case_ ) _snake_case : Tuple = tokenizer_p.from_pretrained(snake_case_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case_ , snake_case_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(snake_case_ ) # Save tokenizer rust, legacy_format=True _snake_case : int = tempfile.mkdtemp() _snake_case : List[Any] = tokenizer_r.save_pretrained(snake_case_ , legacy_format=snake_case_ ) _snake_case : Any = tokenizer_p.save_pretrained(snake_case_ ) # Checks it save with the same files self.assertSequenceEqual(snake_case_ , snake_case_ ) # Checks everything loads correctly in the same way _snake_case : List[Any] = tokenizer_r.from_pretrained(snake_case_ ) _snake_case : Any = tokenizer_p.from_pretrained(snake_case_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case_ , snake_case_ ) ) shutil.rmtree(snake_case_ ) # Save tokenizer rust, legacy_format=False _snake_case : Tuple = tempfile.mkdtemp() _snake_case : Optional[int] = tokenizer_r.save_pretrained(snake_case_ , legacy_format=snake_case_ ) _snake_case : int = tokenizer_p.save_pretrained(snake_case_ ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _snake_case : List[str] = tokenizer_r.from_pretrained(snake_case_ ) _snake_case : List[str] = tokenizer_p.from_pretrained(snake_case_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case_ , snake_case_ ) ) shutil.rmtree(snake_case_ ) @require_torch @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( unittest.TestCase): __lowercase : int = """facebook/mbart-large-50-one-to-many-mmt""" __lowercase : List[str] = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] __lowercase : Any = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei""" """ pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor""" """ face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] __lowercase : List[Any] = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def lowerCamelCase__ ( cls ): _snake_case : MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" ) _snake_case : Any = 1 return cls def lowerCamelCase__ ( self ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_00_04 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_00_20 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 25_00_38 ) def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case_ ) def lowerCamelCase__ ( self ): self.assertIn(snake_case_ , self.tokenizer.all_special_ids ) _snake_case : Optional[int] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2] _snake_case : int = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) _snake_case : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertNotIn(self.tokenizer.eos_token , snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , snake_case_ ) _snake_case : Tuple = 10 _snake_case : Tuple = self.tokenizer(snake_case_ , max_length=snake_case_ , truncation=snake_case_ ).input_ids[0] self.assertEqual(ids[0] , snake_case_ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(snake_case_ ) , snake_case_ ) def lowerCamelCase__ ( self ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_00_53, 25_00_01] ) def lowerCamelCase__ ( self ): _snake_case : Union[str, Any] = tempfile.mkdtemp() _snake_case : Tuple = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(snake_case_ ) _snake_case : List[str] = MBartaaTokenizer.from_pretrained(snake_case_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case_ ) @require_torch def lowerCamelCase__ ( self ): _snake_case : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" ) _snake_case : Any = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def lowerCamelCase__ ( self ): _snake_case : int = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=snake_case_ , truncation=snake_case_ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) _snake_case : str = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) self.assertIsInstance(snake_case_ , snake_case_ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _snake_case : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , snake_case_ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def lowerCamelCase__ ( self ): _snake_case : Dict = self.tokenizer(self.src_text , padding=snake_case_ , truncation=snake_case_ , max_length=3 , return_tensors="pt" ) _snake_case : Any = self.tokenizer( text_target=self.tgt_text , padding=snake_case_ , truncation=snake_case_ , max_length=10 , return_tensors="pt" ) _snake_case : List[Any] = targets["input_ids"] _snake_case : List[Any] = shift_tokens_right(snake_case_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowerCamelCase__ ( self ): _snake_case : Union[str, Any] = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" ) self.assertEqual( nested_simplify(snake_case_ ) , { # en_XX, A, test, EOS "input_ids": [[25_00_04, 62, 30_34, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 25_00_01, } , )
709
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Optional[int] = logging.get_logger(__name__) _a : str = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class _UpperCAmelCase ( _snake_case): __lowercase : Optional[Any] = """openai-gpt""" __lowercase : Dict = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , snake_case_=4_04_78 , snake_case_=5_12 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_="cls_index" , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=0.1 , **snake_case_ , ): _snake_case : Tuple = vocab_size _snake_case : Dict = n_positions _snake_case : Any = n_embd _snake_case : Any = n_layer _snake_case : Optional[int] = n_head _snake_case : Union[str, Any] = afn _snake_case : Dict = resid_pdrop _snake_case : str = embd_pdrop _snake_case : Union[str, Any] = attn_pdrop _snake_case : str = layer_norm_epsilon _snake_case : Union[str, Any] = initializer_range _snake_case : Any = summary_type _snake_case : List[str] = summary_use_proj _snake_case : Optional[int] = summary_activation _snake_case : Union[str, Any] = summary_first_dropout _snake_case : Optional[int] = summary_proj_to_labels super().__init__(**snake_case_ )
87
0
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _a : Optional[Any] = logging.get_logger(__name__) _a : str = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """ctc_proj""", """mask_emb""": """masked_spec_embed""", } _a : List[str] = [ """ctc_proj""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def a__ ( a : Any , a : Optional[int] , a : Any , a : Dict , a : List[str] , a : Any ): """simple docstring""" for attribute in key.split("." ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models _snake_case : Dict = "lm_head" _snake_case : Union[str, Any] = getattr(a , a ) if weight_type is not None: _snake_case : List[str] = getattr(a , a ).shape else: _snake_case : Union[str, Any] = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": _snake_case : Any = value elif weight_type == "weight_g": _snake_case : List[Any] = value elif weight_type == "weight_v": _snake_case : List[Any] = value elif weight_type == "bias": _snake_case : Tuple = value else: _snake_case : str = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def a__ ( a : str , a : Optional[int] , a : Any ): """simple docstring""" _snake_case : str = [] _snake_case : Dict = fairseq_model.state_dict() _snake_case : Dict = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): _snake_case : str = False if "conv_layers" in name: load_conv_layer( a , a , a , a , hf_model.config.feat_extract_norm == "group" , ) _snake_case : Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): _snake_case : Union[str, Any] = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _snake_case : Any = True if "*" in mapped_key: _snake_case : Optional[Any] = name.split(a )[0].split("." )[-2] _snake_case : str = mapped_key.replace("*" , a ) if "weight_g" in name: _snake_case : List[Any] = "weight_g" elif "weight_v" in name: _snake_case : str = "weight_v" elif "bias" in name: _snake_case : List[str] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj _snake_case : List[str] = "weight" else: _snake_case : str = None set_recursively(a , a , a , a , a , a ) continue if not is_used: unused_weights.append(a ) logger.warning(f'Unused weights: {unused_weights}' ) def a__ ( a : int , a : Any , a : int , a : str , a : str ): """simple docstring""" _snake_case : Optional[int] = full_name.split("conv_layers." )[-1] _snake_case : Dict = name.split("." ) _snake_case : Optional[Any] = int(items[0] ) _snake_case : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) _snake_case : Tuple = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) _snake_case : str = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) _snake_case : int = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) _snake_case : Any = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(a ) @torch.no_grad() def a__ ( a : Any , a : List[str] , a : int=None , a : Union[str, Any]=None , a : str=True ): """simple docstring""" if config_path is not None: _snake_case : Optional[int] = UniSpeechConfig.from_pretrained(a ) else: _snake_case : Optional[int] = UniSpeechConfig() if is_finetuned: if dict_path: _snake_case : Any = Dictionary.load_from_json(a ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _snake_case : Optional[Any] = target_dict.pad_index _snake_case : List[Any] = target_dict.bos_index _snake_case : Union[str, Any] = target_dict.eos_index _snake_case : int = len(target_dict.symbols ) _snake_case : Any = os.path.join(a , "vocab.json" ) if not os.path.isdir(a ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(a ) ) return os.makedirs(a , exist_ok=a ) _snake_case : Any = target_dict.indices # fairseq has the <pad> and <s> switched _snake_case : Optional[int] = 42 _snake_case : List[Any] = 43 with open(a , "w" , encoding="utf-8" ) as vocab_handle: json.dump(a , a ) _snake_case : List[str] = WavaVecaPhonemeCTCTokenizer( a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=a , ) _snake_case : Optional[Any] = True if config.feat_extract_norm == "layer" else False _snake_case : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=a , return_attention_mask=a , ) _snake_case : Optional[Any] = WavaVecaProcessor(feature_extractor=a , tokenizer=a ) processor.save_pretrained(a ) _snake_case : Any = UniSpeechForCTC(a ) else: _snake_case : List[Any] = UniSpeechForPreTraining(a ) if is_finetuned: _snake_case : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} ) else: _snake_case : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _snake_case : Optional[int] = model[0].eval() recursively_load_weights(a , a , a ) hf_unispeech.save_pretrained(a ) if __name__ == "__main__": _a : Dict = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) _a : Optional[int] = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
710
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() _a : Tuple = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _a : int = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def a__ ( a : List[str] , a : int , a : int ): """simple docstring""" _snake_case : Union[str, Any] = state_dict.pop(a ) _snake_case : Union[str, Any] = val def a__ ( a : Tuple ): """simple docstring""" _snake_case : Tuple = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: _snake_case : Dict = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) _snake_case : Tuple = value else: _snake_case : Dict = value return new_state_dict def a__ ( a : int ): """simple docstring""" _snake_case : Any = "" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) _snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : int = in_proj_weight[:256, :] _snake_case : List[str] = in_proj_bias[:256] _snake_case : Optional[Any] = in_proj_weight[256:512, :] _snake_case : List[str] = in_proj_bias[256:512] _snake_case : Dict = in_proj_weight[-256:, :] _snake_case : Dict = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention _snake_case : List[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) _snake_case : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : Union[str, Any] = in_proj_weight[:256, :] _snake_case : Tuple = in_proj_bias[:256] _snake_case : int = in_proj_weight[256:512, :] _snake_case : int = in_proj_bias[256:512] _snake_case : Dict = in_proj_weight[-256:, :] _snake_case : str = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention _snake_case : Dict = state_dict.pop( f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) _snake_case : Optional[int] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict _snake_case : Dict = in_proj_weight_cross_attn[:256, :] _snake_case : Any = in_proj_bias_cross_attn[:256] _snake_case : Union[str, Any] = in_proj_weight_cross_attn[256:512, :] _snake_case : Optional[int] = in_proj_bias_cross_attn[256:512] _snake_case : Any = in_proj_weight_cross_attn[-256:, :] _snake_case : str = in_proj_bias_cross_attn[-256:] def a__ ( a : str , a : int ): """simple docstring""" _snake_case , _snake_case : List[str] = image.size _snake_case : Dict = max(a , a ) _snake_case : Union[str, Any] = 800 if "detection" in checkpoint_url else 1_000 _snake_case : Any = target_max_size / current_max_size _snake_case : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def a__ ( a : str ): """simple docstring""" _snake_case : str = F.to_tensor(a ) _snake_case : Union[str, Any] = F.normalize(a , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def a__ ( a : Optional[Any] , a : Any , a : Union[str, Any] ): """simple docstring""" logger.info("Converting model..." ) # load original state dict _snake_case : Tuple = torch.hub.load_state_dict_from_url(a , map_location="cpu" ) # rename keys for src, dest in rename_keys: rename_key(a , a , a ) _snake_case : Union[str, Any] = rename_backbone_keys(a ) # query, key and value matrices need special treatment read_in_q_k_v(a ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _snake_case : int = "model." for key in state_dict.copy().keys(): if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): _snake_case : Optional[int] = state_dict.pop(a ) _snake_case : Any = val # create HuggingFace model and load state dict _snake_case : Tuple = TableTransformerConfig( backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: _snake_case : Any = 15 _snake_case : int = 2 _snake_case : Optional[Any] = {0: "table", 1: "table rotated"} _snake_case : Union[str, Any] = idalabel _snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} else: _snake_case : Any = 125 _snake_case : Union[str, Any] = 6 _snake_case : List[str] = { 0: "table", 1: "table column", 2: "table row", 3: "table column header", 4: "table projected row header", 5: "table spanning cell", } _snake_case : Any = idalabel _snake_case : Optional[int] = {v: k for k, v in idalabel.items()} _snake_case : Union[str, Any] = DetrImageProcessor( format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 ) _snake_case : str = TableTransformerForObjectDetection(a ) model.load_state_dict(a ) model.eval() # verify our conversion _snake_case : Optional[int] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png" _snake_case : Optional[Any] = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=a ) _snake_case : Dict = Image.open(a ).convert("RGB" ) _snake_case : Union[str, Any] = normalize(resize(a , a ) ).unsqueeze(0 ) _snake_case : str = model(a ) if "detection" in checkpoint_url: _snake_case : int = (1, 15, 3) _snake_case : List[str] = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) _snake_case : List[str] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: _snake_case : Union[str, Any] = (1, 125, 7) _snake_case : str = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) _snake_case : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) image_processor.save_pretrained(a ) if push_to_hub: # Push model to HF hub logger.info("Pushing model to the hub..." ) _snake_case : int = ( "microsoft/table-transformer-detection" if "detection" in checkpoint_url else "microsoft/table-transformer-structure-recognition" ) model.push_to_hub(a ) image_processor.push_to_hub(a ) if __name__ == "__main__": _a : Tuple = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _a : Any = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
87
0
"""simple docstring""" def a__ ( a : Tuple , a : List[str] ): """simple docstring""" _snake_case : str = [0 for i in range(r + 1 )] # nc0 = 1 _snake_case : Optional[int] = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. _snake_case : Tuple = min(a , a ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
711
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
87
0
from __future__ import annotations import requests def a__ ( a : str ): """simple docstring""" _snake_case : Union[str, Any] = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty' return requests.get(a ).json() def a__ ( a : int = 10 ): """simple docstring""" _snake_case : str = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty" _snake_case : Any = requests.get(a ).json()[:max_stories] return [get_hackernews_story(a ) for story_id in story_ids] def a__ ( a : int = 10 ): """simple docstring""" _snake_case : Optional[Any] = hackernews_top_stories(a ) return "\n".join("* [{title}]({url})".format(**a ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
712
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _a : Optional[int] = logging.get_logger(__name__) _a : List[str] = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class _UpperCAmelCase ( _snake_case , _snake_case): __lowercase : List[Any] = """convnextv2""" def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=4 , snake_case_=None , snake_case_=None , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.0 , snake_case_=2_24 , snake_case_=None , snake_case_=None , **snake_case_ , ): super().__init__(**snake_case_ ) _snake_case : Tuple = num_channels _snake_case : Optional[int] = patch_size _snake_case : Tuple = num_stages _snake_case : int = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes _snake_case : str = [3, 3, 9, 3] if depths is None else depths _snake_case : int = hidden_act _snake_case : Tuple = initializer_range _snake_case : Union[str, Any] = layer_norm_eps _snake_case : Optional[int] = drop_path_rate _snake_case : Union[str, Any] = image_size _snake_case : List[Any] = ["stem"] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )] _snake_case , _snake_case : Dict = get_aligned_output_features_output_indices( out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
87
0
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
713
"""simple docstring""" from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def a__ ( a : Namespace ): """simple docstring""" return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) _a : int = """ transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. """ class _UpperCAmelCase ( _snake_case): @staticmethod def lowerCamelCase__ ( snake_case_ ): _snake_case : Dict = parser.add_parser( "convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , ) train_parser.add_argument("--model_type" , type=snake_case_ , required=snake_case_ , help="Model's type." ) train_parser.add_argument( "--tf_checkpoint" , type=snake_case_ , required=snake_case_ , help="TensorFlow checkpoint path or folder." ) train_parser.add_argument( "--pytorch_dump_output" , type=snake_case_ , required=snake_case_ , help="Path to the PyTorch saved model output." ) train_parser.add_argument("--config" , type=snake_case_ , default="" , help="Configuration file path or folder." ) train_parser.add_argument( "--finetuning_task_name" , type=snake_case_ , default=snake_case_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , ) train_parser.set_defaults(func=snake_case_ ) def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ , ): _snake_case : str = logging.get_logger("transformers-cli/converting" ) self._logger.info(F'Loading model {model_type}' ) _snake_case : Optional[int] = model_type _snake_case : Any = tf_checkpoint _snake_case : Optional[int] = pytorch_dump_output _snake_case : Tuple = config _snake_case : Tuple = finetuning_task_name def lowerCamelCase__ ( self ): if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) if "ckpt" in self._tf_checkpoint.lower(): _snake_case : int = self._tf_checkpoint _snake_case : Optional[Any] = "" else: _snake_case : Optional[int] = self._tf_checkpoint _snake_case : List[str] = "" convert_transfo_xl_checkpoint_to_pytorch( snake_case_ , self._config , self._pytorch_dump_output , snake_case_ ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
87
0
from manim import * class _UpperCAmelCase ( _snake_case): def lowerCamelCase__ ( self ): _snake_case : Optional[int] = Rectangle(height=0.5 , width=0.5 ) _snake_case : Any = Rectangle(height=0.25 , width=0.25 ) _snake_case : int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _snake_case : str = [mem.copy() for i in range(6 )] _snake_case : Dict = [mem.copy() for i in range(6 )] _snake_case : str = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) _snake_case : Any = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) _snake_case : Any = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 ) _snake_case : Tuple = Text("CPU" , font_size=24 ) _snake_case : Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(snake_case_ ) _snake_case : Tuple = [mem.copy() for i in range(4 )] _snake_case : List[Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) _snake_case : Tuple = Text("GPU" , font_size=24 ) _snake_case : Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) gpu.move_to([-1, -1, 0] ) self.add(snake_case_ ) _snake_case : List[Any] = [mem.copy() for i in range(6 )] _snake_case : Union[str, Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) _snake_case : Dict = Text("Model" , font_size=24 ) _snake_case : int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) model.move_to([3, -1.0, 0] ) self.add(snake_case_ ) _snake_case : Union[str, Any] = [] _snake_case : List[Any] = [] _snake_case : Optional[int] = [] for i, rect in enumerate(snake_case_ ): rect.set_stroke(snake_case_ ) _snake_case : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=snake_case_ , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=snake_case_ , buff=0.0 ) self.add(snake_case_ ) model_cpu_arr.append(snake_case_ ) self.add(*snake_case_ , *snake_case_ , *snake_case_ ) _snake_case : int = [mem.copy() for i in range(6 )] _snake_case : List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) _snake_case : List[str] = Text("Loaded Checkpoint" , font_size=24 ) _snake_case : Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) checkpoint.move_to([3, 0.5, 0] ) self.add(snake_case_ ) _snake_case : Optional[int] = [] _snake_case : List[Any] = [] for i, rect in enumerate(snake_case_ ): _snake_case : Optional[Any] = fill.copy().set_fill(snake_case_ , opacity=0.7 ) target.move_to(snake_case_ ) ckpt_arr.append(snake_case_ ) _snake_case : List[Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(snake_case_ ) self.add(*snake_case_ , *snake_case_ ) _snake_case : int = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _snake_case : str = MarkupText( F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(snake_case_ , snake_case_ ) _snake_case : Dict = MarkupText( F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , ) blue_text.next_to(snake_case_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(snake_case_ ) _snake_case : List[Any] = MarkupText( F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , ) step_a.move_to([2, 2, 0] ) _snake_case : str = [meta_mem.copy() for i in range(6 )] _snake_case : Tuple = [meta_mem.copy() for i in range(6 )] _snake_case : List[Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) _snake_case : List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) _snake_case : Optional[Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 ) _snake_case : Any = Text("Disk" , font_size=24 ) _snake_case : Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(snake_case_ , run_time=3 ) , Write(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) ) _snake_case : Optional[int] = [] for i, rect in enumerate(snake_case_ ): _snake_case : str = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) ) self.play(*snake_case_ ) self.play(FadeOut(snake_case_ ) ) _snake_case : Union[str, Any] = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(snake_case_ , run_time=3 ) ) self.play( FadeOut(snake_case_ , snake_case_ , *snake_case_ , *snake_case_ ) , ) self.wait()
714
"""simple docstring""" import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def a__ ( a : List[str] , a : Any ): """simple docstring""" if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer _snake_case : Any = flax_key_tuple[:-1] + ("weight",) _snake_case : str = torch.permute(a , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(a ): # linear layer _snake_case : Optional[int] = flax_key_tuple[:-1] + ("weight",) _snake_case : Any = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: _snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("weight",) return flax_key_tuple, flax_tensor def a__ ( a : List[Any] , a : Union[str, Any] , a : List[str] ): """simple docstring""" if "metadata" in layer: _snake_case : Optional[int] = layer.split("metadata" ) _snake_case : Optional[int] = "".join(split_layer[0] )[:-1] _snake_case : int = [tuple(("metadata" + split_layer[1]).split("/" ) )] elif "kvstore" in layer: _snake_case : Any = layer.split("kvstore" ) _snake_case : str = "".join(split_layer[0] )[:-1] _snake_case : Any = [tuple(("kvstore" + split_layer[1]).split("/" ) )] else: _snake_case : List[Any] = layer.split("/" ) _snake_case : Tuple = "/".join(split_layer[:-1] ) _snake_case : int = (split_layer[-1],) if "kvstore/path" in layer: _snake_case : Optional[Any] = f'{switch_checkpoint_path}/{checkpoint_info[layer]}' elif "kvstore/driver" in layer: _snake_case : Tuple = "file" else: _snake_case : Optional[int] = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def a__ ( a : List[Any] , a : List[Any] ): """simple docstring""" _snake_case : Union[str, Any] = rename_keys(a ) _snake_case : int = {} for k, v in current_block.items(): _snake_case : Optional[int] = v _snake_case : Optional[int] = new_current_block torch.save(a , a ) def a__ ( a : Dict , a : Tuple , a : List[str] , a : int , a : str = WEIGHTS_NAME ): """simple docstring""" _snake_case : Any = convert_file_size_to_int(a ) _snake_case : Tuple = [] _snake_case : Optional[int] = {} _snake_case : Tuple = 0 _snake_case : Optional[Any] = 0 os.makedirs(a , exist_ok=a ) with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp: _snake_case : Any = serialization.msgpack_restore(fp.read() )["optimizer"]["target"] _snake_case : Optional[Any] = flatten_dict(a , sep="/" ) _snake_case : Optional[Any] = {} for layer in checkpoint_info.keys(): _snake_case , _snake_case , _snake_case : int = get_key_and_tensorstore_dict( a , a , a ) if curr_real_layer_name in all_layers: _snake_case : Dict = content else: _snake_case : Tuple = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file _snake_case : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() _snake_case : Dict = torch.tensor(a ) _snake_case : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts _snake_case , _snake_case : Optional[int] = rename_base_flax_keys(tuple(key.split("/" ) ) , a ) _snake_case : Optional[Any] = "/".join(a ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: _snake_case : Any = os.path.join( a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) ) rename_and_save_block(a , a ) sharded_state_dicts.append(current_block.keys() ) del current_block _snake_case : List[Any] = {} _snake_case : str = 0 _snake_case : List[str] = raw_weights.to(getattr(a , a ) ) current_block_size += weight_size total_size += weight_size # Add the last block _snake_case : int = os.path.join(a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) ) rename_and_save_block(a , a ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(a ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index _snake_case : str = {} _snake_case : Any = {} for idx, shard in enumerate(a ): _snake_case : Optional[int] = weights_name.replace( ".bin" , f'-{idx+1:05d}-of-{len(a ):05d}.bin' ) # len(sharded_state_dicts):05d} _snake_case : Dict = os.path.join(a , weights_name.replace(".bin" , f'-{idx+1:05d}-of-???.bin' ) ) os.rename(a , os.path.join(a , a ) ) _snake_case : Dict = shard for key in shard: _snake_case : int = shard_file # Add the metadata _snake_case : List[Any] = {"total_size": total_size} _snake_case : Any = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f: _snake_case : Union[str, Any] = json.dumps(a , indent=2 , sort_keys=a ) + "\n" f.write(a ) return metadata, index if __name__ == "__main__": _a : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""") parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""", type=str, required=False, help="""Path to the output pytorch model.""", ) _a : Optional[int] = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def a__ ( ): """simple docstring""" from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer _snake_case : List[str] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" ) config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" ) _snake_case : str = SwitchTransformersForConditionalGeneration.from_pretrained( "/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" ) _snake_case : List[Any] = TaTokenizer.from_pretrained("t5-small" ) _snake_case : Optional[Any] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." _snake_case : Dict = tokenizer(a , return_tensors="pt" ).input_ids _snake_case : List[Any] = model.generate(a , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
87
0
"""simple docstring""" from collections.abc import Generator from math import sin def a__ ( a : bytes ): """simple docstring""" if len(a ) != 32: raise ValueError("Input must be of length 32" ) _snake_case : Tuple = b"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def a__ ( a : int ): """simple docstring""" if i < 0: raise ValueError("Input must be non-negative" ) _snake_case : Optional[Any] = format(a , "08x" )[-8:] _snake_case : List[str] = b"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def a__ ( a : bytes ): """simple docstring""" _snake_case : Union[str, Any] = b"" for char in message: bit_string += format(a , "08b" ).encode("utf-8" ) _snake_case : Dict = format(len(a ) , "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(a ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def a__ ( a : bytes ): """simple docstring""" if len(a ) % 512 != 0: raise ValueError("Input must have length that's a multiple of 512" ) for pos in range(0 , len(a ) , 512 ): _snake_case : Optional[Any] = bit_string[pos : pos + 512] _snake_case : Union[str, Any] = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def a__ ( a : int ): """simple docstring""" if i < 0: raise ValueError("Input must be non-negative" ) _snake_case : int = format(a , "032b" ) _snake_case : Optional[Any] = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(a , 2 ) def a__ ( a : int , a : int ): """simple docstring""" return (a + b) % 2**32 def a__ ( a : int , a : int ): """simple docstring""" if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def a__ ( a : bytes ): """simple docstring""" _snake_case : str = preprocess(a ) _snake_case : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states _snake_case : int = 0X67452301 _snake_case : int = 0Xefcdab89 _snake_case : Optional[int] = 0X98badcfe _snake_case : List[str] = 0X10325476 _snake_case : List[Any] = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(a ): _snake_case : Tuple = aa _snake_case : Dict = ba _snake_case : Union[str, Any] = ca _snake_case : str = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f _snake_case : List[str] = d ^ (b & (c ^ d)) _snake_case : int = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f _snake_case : List[Any] = c ^ (d & (b ^ c)) _snake_case : Optional[Any] = (5 * i + 1) % 16 elif i <= 47: _snake_case : Union[str, Any] = b ^ c ^ d _snake_case : str = (3 * i + 5) % 16 else: _snake_case : Union[str, Any] = c ^ (b | not_aa(a )) _snake_case : str = (7 * i) % 16 _snake_case : Dict = (f + a + added_consts[i] + block_words[g]) % 2**32 _snake_case : List[str] = d _snake_case : Any = c _snake_case : int = b _snake_case : Optional[int] = sum_aa(a , left_rotate_aa(a , shift_amounts[i] ) ) # Add hashed chunk to running total _snake_case : Optional[int] = sum_aa(a , a ) _snake_case : int = sum_aa(a , a ) _snake_case : Optional[Any] = sum_aa(a , a ) _snake_case : Any = sum_aa(a , a ) _snake_case : List[str] = reformat_hex(a ) + reformat_hex(a ) + reformat_hex(a ) + reformat_hex(a ) return digest if __name__ == "__main__": import doctest doctest.testmod()
715
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class _UpperCAmelCase ( _snake_case , _snake_case , unittest.TestCase): __lowercase : Dict = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) __lowercase : Optional[Any] = ( { """feature-extraction""": TFMobileBertModel, """fill-mask""": TFMobileBertForMaskedLM, """question-answering""": TFMobileBertForQuestionAnswering, """text-classification""": TFMobileBertForSequenceClassification, """token-classification""": TFMobileBertForTokenClassification, """zero-shot""": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) __lowercase : Union[str, Any] = False __lowercase : Optional[int] = False def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_=False ): _snake_case : Union[str, Any] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) if return_labels: if model_class in get_values(snake_case_ ): _snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ): _snake_case : Optional[Any] = parent _snake_case : List[Any] = batch_size _snake_case : Optional[int] = seq_length _snake_case : Dict = is_training _snake_case : Union[str, Any] = use_input_mask _snake_case : List[Any] = use_token_type_ids _snake_case : int = use_labels _snake_case : Dict = vocab_size _snake_case : Tuple = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : Optional[Any] = intermediate_size _snake_case : Dict = hidden_act _snake_case : Tuple = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : str = max_position_embeddings _snake_case : str = type_vocab_size _snake_case : Any = type_sequence_label_size _snake_case : Optional[int] = initializer_range _snake_case : List[Any] = num_labels _snake_case : Optional[int] = num_choices _snake_case : Optional[int] = scope _snake_case : Any = embedding_size def lowerCamelCase__ ( self ): _snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : Optional[Any] = None if self.use_input_mask: _snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case : List[str] = None if self.use_token_type_ids: _snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _snake_case : Dict = None _snake_case : Tuple = None _snake_case : str = None if self.use_labels: _snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) _snake_case : Tuple = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Dict = TFMobileBertModel(config=snake_case_ ) _snake_case : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Optional[int] = model(snake_case_ ) _snake_case : Union[str, Any] = [input_ids, input_mask] _snake_case : Optional[Any] = model(snake_case_ ) _snake_case : Dict = model(snake_case_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : List[Any] = TFMobileBertForMaskedLM(config=snake_case_ ) _snake_case : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : List[str] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=snake_case_ ) _snake_case : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Tuple = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : str = TFMobileBertForPreTraining(config=snake_case_ ) _snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : List[Any] = model(snake_case_ ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : str = self.num_labels _snake_case : str = TFMobileBertForSequenceClassification(config=snake_case_ ) _snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Optional[int] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Any = self.num_choices _snake_case : Tuple = TFMobileBertForMultipleChoice(config=snake_case_ ) _snake_case : List[Any] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) ) _snake_case : List[str] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) ) _snake_case : Tuple = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) ) _snake_case : int = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _snake_case : Optional[Any] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Union[str, Any] = self.num_labels _snake_case : Optional[int] = TFMobileBertForTokenClassification(config=snake_case_ ) _snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : List[Any] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : int = TFMobileBertForQuestionAnswering(config=snake_case_ ) _snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Union[str, Any] = model(snake_case_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Tuple = config_and_inputs _snake_case : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict def lowerCamelCase__ ( self ): _snake_case : int = TFMobileBertModelTest.TFMobileBertModelTester(self ) _snake_case : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 ) def lowerCamelCase__ ( self ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self ): _snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ ) @slow def lowerCamelCase__ ( self ): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _snake_case : str = TFMobileBertModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) @require_tf class _UpperCAmelCase ( unittest.TestCase): @slow def lowerCamelCase__ ( self ): _snake_case : Any = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" ) _snake_case : str = tf.constant([[0, 1, 2, 3, 4, 5]] ) _snake_case : Union[str, Any] = model(snake_case_ )[0] _snake_case : int = [1, 6, 3_05_22] self.assertEqual(output.shape , snake_case_ ) _snake_case : Optional[Any] = tf.constant( [ [ [-4.5919547, -9.248295, -9.645256], [-6.7306175, -6.440284, -6.6052837], [-7.2743506, -6.7847915, -6.024673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
87
0
"""simple docstring""" import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _UpperCAmelCase ( unittest.TestCase): @property def lowerCamelCase__ ( self ): torch.manual_seed(0 ) _snake_case : str = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model @property def lowerCamelCase__ ( self ): torch.manual_seed(0 ) _snake_case : List[str] = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , ) return model @property def lowerCamelCase__ ( self ): torch.manual_seed(0 ) _snake_case : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModel(snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Tuple = self.dummy_uncond_unet _snake_case : List[Any] = DDIMScheduler() _snake_case : Union[str, Any] = self.dummy_vq_model _snake_case : Optional[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ ) ldm.to(snake_case_ ) ldm.set_progress_bar_config(disable=snake_case_ ) _snake_case : List[str] = torch.manual_seed(0 ) _snake_case : int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="numpy" ).images _snake_case : str = torch.manual_seed(0 ) _snake_case : List[str] = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="numpy" , return_dict=snake_case_ )[0] _snake_case : Dict = image[0, -3:, -3:, -1] _snake_case : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _snake_case : str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] ) _snake_case : str = 1E-2 if torch_device != "mps" else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class _UpperCAmelCase ( unittest.TestCase): def lowerCamelCase__ ( self ): _snake_case : Tuple = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" ) ldm.to(snake_case_ ) ldm.set_progress_bar_config(disable=snake_case_ ) _snake_case : Dict = torch.manual_seed(0 ) _snake_case : Tuple = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="numpy" ).images _snake_case : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) _snake_case : str = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] ) _snake_case : Optional[Any] = 1E-2 if torch_device != "mps" else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
716
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _a : List[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = ["""BartphoTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys _a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class _UpperCAmelCase ( pl.LightningModule): def __init__( self , snake_case_ ): super().__init__() _snake_case : Optional[int] = model _snake_case : Optional[int] = 2 _snake_case : List[Any] = nn.Linear(self.model.config.hidden_size , self.num_labels ) def lowerCamelCase__ ( self ): pass def a__ ( a : str , a : str , a : str ): """simple docstring""" _snake_case : List[str] = LongformerModel.from_pretrained(a ) _snake_case : Union[str, Any] = LightningModel(a ) _snake_case : Optional[Any] = torch.load(a , map_location=torch.device("cpu" ) ) lightning_model.load_state_dict(ckpt["state_dict"] ) # init longformer question answering model _snake_case : Union[str, Any] = LongformerForQuestionAnswering.from_pretrained(a ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(a ) print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' ) if __name__ == "__main__": _a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--longformer_model""", default=None, type=str, required=True, help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""", ) parser.add_argument( """--longformer_question_answering_ckpt_path""", default=None, type=str, required=True, help="""Path the official PyTorch Lightning Checkpoint.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _a : Tuple = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
717
"""simple docstring""" def a__ ( a : list , a : int , a : int = 0 , a : int = 0 ): """simple docstring""" _snake_case : Optional[int] = right or len(a ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(a , a , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
87
0
"""simple docstring""" import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup _a : int = logging.get_logger(__name__) class _UpperCAmelCase ( _snake_case): def __init__( self , **snake_case_ ): requires_backends(self , ["bs4"] ) super().__init__(**snake_case_ ) def lowerCamelCase__ ( self , snake_case_ ): _snake_case : List[Any] = [] _snake_case : str = [] _snake_case : Optional[Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag _snake_case : List[str] = parent.find_all(child.name , recursive=snake_case_ ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(snake_case_ ) else next(i for i, s in enumerate(snake_case_ , 1 ) if s is child ) ) _snake_case : str = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def lowerCamelCase__ ( self , snake_case_ ): _snake_case : Any = BeautifulSoup(snake_case_ , "html.parser" ) _snake_case : Dict = [] _snake_case : Union[str, Any] = [] _snake_case : Dict = [] for element in html_code.descendants: if type(snake_case_ ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue _snake_case : Optional[Any] = html.unescape(snake_case_ ).strip() if not text_in_this_tag: continue all_doc_strings.append(snake_case_ ) _snake_case : Union[str, Any] = self.xpath_soup(snake_case_ ) stringaxtag_seq.append(snake_case_ ) stringaxsubs_seq.append(snake_case_ ) if len(snake_case_ ) != len(snake_case_ ): raise ValueError("Number of doc strings and xtags does not correspond" ) if len(snake_case_ ) != len(snake_case_ ): raise ValueError("Number of doc strings and xsubs does not correspond" ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def lowerCamelCase__ ( self , snake_case_ , snake_case_ ): _snake_case : Tuple = "" for tagname, subs in zip(snake_case_ , snake_case_ ): xpath += F'/{tagname}' if subs != 0: xpath += F'[{subs}]' return xpath def __call__( self , snake_case_ ): _snake_case : Optional[Any] = False # Check that strings has a valid type if isinstance(snake_case_ , snake_case_ ): _snake_case : str = True elif isinstance(snake_case_ , (list, tuple) ): if len(snake_case_ ) == 0 or isinstance(html_strings[0] , snake_case_ ): _snake_case : Optional[Any] = True if not valid_strings: raise ValueError( "HTML strings must of type `str`, `List[str]` (batch of examples), " F'but is of type {type(snake_case_ )}.' ) _snake_case : Union[str, Any] = bool(isinstance(snake_case_ , (list, tuple) ) and (isinstance(html_strings[0] , snake_case_ )) ) if not is_batched: _snake_case : str = [html_strings] # Get nodes + xpaths _snake_case : str = [] _snake_case : List[Any] = [] for html_string in html_strings: _snake_case : List[str] = self.get_three_from_single(snake_case_ ) nodes.append(snake_case_ ) _snake_case : str = [] for node, tag_list, sub_list in zip(snake_case_ , snake_case_ , snake_case_ ): _snake_case : Union[str, Any] = self.construct_xpath(snake_case_ , snake_case_ ) xpath_strings.append(snake_case_ ) xpaths.append(snake_case_ ) # return as Dict _snake_case : List[str] = {"nodes": nodes, "xpaths": xpaths} _snake_case : int = BatchFeature(data=snake_case_ , tensor_type=snake_case_ ) return encoded_inputs
718
"""simple docstring""" from __future__ import annotations class _UpperCAmelCase : def __init__( self , snake_case_ , snake_case_ ): _snake_case , _snake_case : Dict = text, pattern _snake_case , _snake_case : int = len(snake_case_ ), len(snake_case_ ) def lowerCamelCase__ ( self , snake_case_ ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def lowerCamelCase__ ( self , snake_case_ ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def lowerCamelCase__ ( self ): # searches pattern in text and returns index positions _snake_case : List[str] = [] for i in range(self.textLen - self.patLen + 1 ): _snake_case : Union[str, Any] = self.mismatch_in_text(snake_case_ ) if mismatch_index == -1: positions.append(snake_case_ ) else: _snake_case : Tuple = self.match_in_pattern(self.text[mismatch_index] ) _snake_case : Tuple = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions _a : List[Any] = """ABAABA""" _a : str = """AB""" _a : List[Any] = BoyerMooreSearch(text, pattern) _a : Any = bms.bad_character_heuristic() if len(positions) == 0: print("""No match found""") else: print("""Pattern found in following positions: """) print(positions)
87
0
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() _a : Tuple = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _a : int = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def a__ ( a : List[str] , a : int , a : int ): """simple docstring""" _snake_case : Union[str, Any] = state_dict.pop(a ) _snake_case : Union[str, Any] = val def a__ ( a : Tuple ): """simple docstring""" _snake_case : Tuple = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: _snake_case : Dict = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) _snake_case : Tuple = value else: _snake_case : Dict = value return new_state_dict def a__ ( a : int ): """simple docstring""" _snake_case : Any = "" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) _snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : int = in_proj_weight[:256, :] _snake_case : List[str] = in_proj_bias[:256] _snake_case : Optional[Any] = in_proj_weight[256:512, :] _snake_case : List[str] = in_proj_bias[256:512] _snake_case : Dict = in_proj_weight[-256:, :] _snake_case : Dict = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention _snake_case : List[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) _snake_case : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : Union[str, Any] = in_proj_weight[:256, :] _snake_case : Tuple = in_proj_bias[:256] _snake_case : int = in_proj_weight[256:512, :] _snake_case : int = in_proj_bias[256:512] _snake_case : Dict = in_proj_weight[-256:, :] _snake_case : str = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention _snake_case : Dict = state_dict.pop( f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) _snake_case : Optional[int] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict _snake_case : Dict = in_proj_weight_cross_attn[:256, :] _snake_case : Any = in_proj_bias_cross_attn[:256] _snake_case : Union[str, Any] = in_proj_weight_cross_attn[256:512, :] _snake_case : Optional[int] = in_proj_bias_cross_attn[256:512] _snake_case : Any = in_proj_weight_cross_attn[-256:, :] _snake_case : str = in_proj_bias_cross_attn[-256:] def a__ ( a : str , a : int ): """simple docstring""" _snake_case : List[str] = image.size _snake_case : Dict = max(a , a ) _snake_case : Union[str, Any] = 800 if "detection" in checkpoint_url else 1_000 _snake_case : Any = target_max_size / current_max_size _snake_case : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def a__ ( a : str ): """simple docstring""" _snake_case : str = F.to_tensor(a ) _snake_case : Union[str, Any] = F.normalize(a , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def a__ ( a : Optional[Any] , a : Any , a : Union[str, Any] ): """simple docstring""" logger.info("Converting model..." ) # load original state dict _snake_case : Tuple = torch.hub.load_state_dict_from_url(a , map_location="cpu" ) # rename keys for src, dest in rename_keys: rename_key(a , a , a ) _snake_case : Union[str, Any] = rename_backbone_keys(a ) # query, key and value matrices need special treatment read_in_q_k_v(a ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _snake_case : int = "model." for key in state_dict.copy().keys(): if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): _snake_case : Optional[int] = state_dict.pop(a ) _snake_case : Any = val # create HuggingFace model and load state dict _snake_case : Tuple = TableTransformerConfig( backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: _snake_case : Any = 15 _snake_case : int = 2 _snake_case : Optional[Any] = {0: "table", 1: "table rotated"} _snake_case : Union[str, Any] = idalabel _snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} else: _snake_case : Any = 125 _snake_case : Union[str, Any] = 6 _snake_case : List[str] = { 0: "table", 1: "table column", 2: "table row", 3: "table column header", 4: "table projected row header", 5: "table spanning cell", } _snake_case : Any = idalabel _snake_case : Optional[int] = {v: k for k, v in idalabel.items()} _snake_case : Union[str, Any] = DetrImageProcessor( format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 ) _snake_case : str = TableTransformerForObjectDetection(a ) model.load_state_dict(a ) model.eval() # verify our conversion _snake_case : Optional[int] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png" _snake_case : Optional[Any] = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=a ) _snake_case : Dict = Image.open(a ).convert("RGB" ) _snake_case : Union[str, Any] = normalize(resize(a , a ) ).unsqueeze(0 ) _snake_case : str = model(a ) if "detection" in checkpoint_url: _snake_case : int = (1, 15, 3) _snake_case : List[str] = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) _snake_case : List[str] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: _snake_case : Union[str, Any] = (1, 125, 7) _snake_case : str = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) _snake_case : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) image_processor.save_pretrained(a ) if push_to_hub: # Push model to HF hub logger.info("Pushing model to the hub..." ) _snake_case : int = ( "microsoft/table-transformer-detection" if "detection" in checkpoint_url else "microsoft/table-transformer-structure-recognition" ) model.push_to_hub(a ) image_processor.push_to_hub(a ) if __name__ == "__main__": _a : Tuple = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _a : Any = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
719
"""simple docstring""" from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": _a : Dict = input("""Enter image url: """).strip() print(f'Downloading image from {url} ...') _a : str = BeautifulSoup(requests.get(url).content, """html.parser""") # The image URL is in the content field of the first meta tag with property og:image _a : str = soup.find("""meta""", {"""property""": """og:image"""})["""content"""] _a : Dict = requests.get(image_url).content _a : str = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg' with open(file_name, """wb""") as fp: fp.write(image_data) print(f'Done. Image saved to disk as {file_name}.')
87
0
"""simple docstring""" class _UpperCAmelCase : def __init__( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Optional[Any] = name _snake_case : List[str] = value _snake_case : Optional[Any] = weight def __repr__( self ): return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})' def lowerCamelCase__ ( self ): return self.value def lowerCamelCase__ ( self ): return self.name def lowerCamelCase__ ( self ): return self.weight def lowerCamelCase__ ( self ): return self.value / self.weight def a__ ( a : Optional[Any] , a : List[str] , a : Union[str, Any] ): """simple docstring""" _snake_case : Union[str, Any] = [] for i in range(len(a ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def a__ ( a : Optional[int] , a : Union[str, Any] , a : Dict ): """simple docstring""" _snake_case : str = sorted(a , key=a , reverse=a ) _snake_case : str = [] _snake_case : Union[str, Any] = 0.0, 0.0 for i in range(len(a ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def a__ ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
720
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _a : Optional[int] = { """configuration_pix2struct""": [ """PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Pix2StructConfig""", """Pix2StructTextConfig""", """Pix2StructVisionConfig""", ], """processing_pix2struct""": ["""Pix2StructProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = ["""Pix2StructImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Dict = [ """PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Pix2StructPreTrainedModel""", """Pix2StructForConditionalGeneration""", """Pix2StructVisionModel""", """Pix2StructTextModel""", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys _a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("""Googling.....""") _a : List[Any] = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:]) _a : Optional[int] = requests.get(url, headers={"""UserAgent""": UserAgent().random}) # res.raise_for_status() with open("""project1a.html""", """wb""") as out_file: # only for knowing the class for data in res.iter_content(10_000): out_file.write(data) _a : Optional[int] = BeautifulSoup(res.text, """html.parser""") _a : Tuple = list(soup.select(""".eZt8xd"""))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("""href""")) else: webbrowser.open(f'https://google.com{link.get("href")}')
721
"""simple docstring""" import argparse import json import subprocess def a__ ( a : Optional[Any] , a : Optional[int] ): """simple docstring""" _snake_case : str = [] _snake_case : Optional[Any] = ( f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"' " https://api.github.com/repos/huggingface/transformers/actions/runners" ) _snake_case : Dict = subprocess.run(a , shell=a , stdout=subprocess.PIPE ) _snake_case : Tuple = output.stdout.decode("utf-8" ) _snake_case : List[str] = json.loads(a ) _snake_case : Any = status["runners"] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(a ) # save the result so we can report them on Slack with open("offline_runners.txt" , "w" ) as fp: fp.write(json.dumps(a ) ) if len(a ) > 0: _snake_case : Any = "\n".join([x["name"] for x in offline_runners] ) raise ValueError(f'The following runners are offline:\n{failed}' ) if __name__ == "__main__": def a__ ( a : Optional[int] ): """simple docstring""" return values.split("," ) _a : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--target_runners""", default=None, type=list_str, required=True, help="""Comma-separated list of runners to check status.""", ) parser.add_argument( """--token""", default=None, type=str, required=True, help="""A token that has actions:read permission.""" ) _a : List[str] = parser.parse_args() get_runner_status(args.target_runners, args.token)
87
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""PLBartTokenizer"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """PLBartForCausalLM""", """PLBartForConditionalGeneration""", """PLBartForSequenceClassification""", """PLBartModel""", """PLBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
88
'''simple docstring''' import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase_ = """▁""" UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = BigBirdTokenizer lowerCamelCase_ = BigBirdTokenizerFast lowerCamelCase_ = True lowerCamelCase_ = True def lowerCamelCase_ ( self : Any ): '''simple docstring''' super().setUp() lowercase : Optional[int] =self.tokenizer_class(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[int] ='''<s>''' lowercase : int =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Dict =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''[MASK]''' ) self.assertEqual(len(UpperCAmelCase__ ) , 1004 ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' if not self.test_rust_tokenizer: return lowercase : Optional[int] =self.get_tokenizer() lowercase : Any =self.get_rust_tokenizer() lowercase : int ='''I was born in 92000, and this is falsé.''' lowercase : List[str] =tokenizer.tokenize(UpperCAmelCase__ ) lowercase : Dict =rust_tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : str =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Optional[Any] =self.get_rust_tokenizer() lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ ) lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Tuple =BigBirdTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) lowercase : Tuple =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , ) lowercase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) lowercase : Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) lowercase : List[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def lowerCamelCase_ ( self : str ): '''simple docstring''' return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : str ='''Hello World!''' lowercase : Union[str, Any] =[65, 18536, 2260, 101, 66] self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @slow def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : int =( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) # fmt: off lowercase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231 # fmt: on self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @require_torch @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence lowercase : List[str] =list(self.big_tokenizer.get_vocab().keys() )[:10] lowercase : Dict =''' '''.join(UpperCAmelCase__ ) lowercase : Union[str, Any] =self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ ) lowercase : Dict =self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ ) lowercase : Optional[int] =BigBirdConfig(attention_type='''original_full''' ) lowercase : Dict =BigBirdModel(UpperCAmelCase__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**UpperCAmelCase__ ) model(**UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Union[str, Any] =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) lowercase : Dict =tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids ) self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' ) @slow def lowerCamelCase_ ( self : int ): '''simple docstring''' # fmt: off lowercase : str ={'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
88
1
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib UpperCamelCase_ = get_logger() UpperCamelCase_ = None class __SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, 'jax.Array', Mapping] ): def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Tuple=None , **UpperCAmelCase__ : Dict ): '''simple docstring''' super().__init__(features=UpperCAmelCase__ ) import jax from jaxlib.xla_client import Device if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError( F'''Expected {device} to be a `str` not {type(UpperCAmelCase__ )}, as `jaxlib.xla_extension.Device` ''' '''is not serializable neither with `pickle` nor with `dill`. Instead you can surround ''' '''the device with `str()` to get its string identifier that will be internally mapped ''' '''to the actual `jaxlib.xla_extension.Device`.''' ) lowercase : Dict =device if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: lowercase : Optional[Any] =self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( F'''Device with string identifier {self.device} not listed among the available ''' F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ''' F'''device: {str(jax.devices()[0] )}.''' ) lowercase : List[Any] =str(jax.devices()[0] ) lowercase : Optional[Any] =jnp_array_kwargs @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' import jax return {str(UpperCAmelCase__ ): device for device in jax.devices()} def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[Any] ): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and column: if all( isinstance(UpperCAmelCase__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(UpperCAmelCase__ , axis=0 ) return column def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(UpperCAmelCase__ , (str, bytes, type(UpperCAmelCase__ )) ): return value elif isinstance(UpperCAmelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() lowercase : Dict ={} if isinstance(UpperCAmelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: lowercase : Tuple ={'''dtype''': jnp.intaa} else: lowercase : Optional[int] ={'''dtype''': jnp.intaa} elif isinstance(UpperCAmelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): lowercase : Union[str, Any] ={'''dtype''': jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCAmelCase__ , PIL.Image.Image ): lowercase : int =np.asarray(UpperCAmelCase__ ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: lowercase : int =self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(UpperCAmelCase__ , **{**default_dtype, **self.jnp_array_kwargs} ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] ): '''simple docstring''' import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(UpperCAmelCase__ , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(UpperCAmelCase__ , '''__array__''' ) and not isinstance(UpperCAmelCase__ , jax.Array ): lowercase : Any =data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCAmelCase__ , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCAmelCase__ ) for substruct in data_struct] ) elif isinstance(UpperCAmelCase__ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCAmelCase__ ) for substruct in data_struct] ) return self._tensorize(UpperCAmelCase__ ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : dict ): '''simple docstring''' return map_nested(self._recursive_tensorize , UpperCAmelCase__ , map_list=UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : pa.Table ): '''simple docstring''' lowercase : Union[str, Any] =self.numpy_arrow_extractor().extract_row(UpperCAmelCase__ ) lowercase : str =self.python_features_decoder.decode_row(UpperCAmelCase__ ) return self.recursive_tensorize(UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : pa.Table ): '''simple docstring''' lowercase : str =self.numpy_arrow_extractor().extract_column(UpperCAmelCase__ ) lowercase : Dict =self.python_features_decoder.decode_column(UpperCAmelCase__ , pa_table.column_names[0] ) lowercase : str =self.recursive_tensorize(UpperCAmelCase__ ) lowercase : Tuple =self._consolidate(UpperCAmelCase__ ) return column def lowerCamelCase_ ( self : str , UpperCAmelCase__ : pa.Table ): '''simple docstring''' lowercase : int =self.numpy_arrow_extractor().extract_batch(UpperCAmelCase__ ) lowercase : Optional[int] =self.python_features_decoder.decode_batch(UpperCAmelCase__ ) lowercase : List[str] =self.recursive_tensorize(UpperCAmelCase__ ) for column_name in batch: lowercase : Optional[int] =self._consolidate(batch[column_name] ) return batch
88
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] ) -> str: lowercase : Optional[Any] =[0 for i in range(r + 1 )] # nc0 = 1 lowercase : Optional[Any] =1 for i in range(1 , n + 1 ): # to compute current row from previous row. lowercase : str =min(__magic_name__ , __magic_name__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
88
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """WavLMForAudioFrameClassification""", """WavLMForCTC""", """WavLMForSequenceClassification""", """WavLMForXVector""", """WavLMModel""", """WavLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
'''simple docstring''' from collections import defaultdict def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> bool: lowercase : Optional[int] =first_str.lower().strip() lowercase : Union[str, Any] =second_str.lower().strip() # Remove whitespace lowercase : Optional[int] =first_str.replace(''' ''' , '''''' ) lowercase : Optional[Any] =second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(__magic_name__ ) != len(__magic_name__ ): return False # Default values for count should be 0 lowercase : defaultdict[str, int] =defaultdict(__magic_name__ ) # For each character in input strings, # increment count in the corresponding for i in range(len(__magic_name__ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCamelCase_ = input("""Enter the first string """).strip() UpperCamelCase_ = input("""Enter the second string """).strip() UpperCamelCase_ = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
88
1
'''simple docstring''' import numpy as np def _lowerCAmelCase ( __magic_name__ : np.ndarray ) -> np.ndarray: return 1 / (1 + np.exp(-vector )) def _lowerCAmelCase ( __magic_name__ : np.ndarray ) -> np.ndarray: return vector * sigmoid(__magic_name__ ) if __name__ == "__main__": import doctest doctest.testmod()
88
'''simple docstring''' import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = None lowerCamelCase_ = BloomTokenizerFast lowerCamelCase_ = BloomTokenizerFast lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = 'tokenizer_file' lowerCamelCase_ = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' super().setUp() lowercase : Union[str, Any] =BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Any ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : str =self.get_rust_tokenizer() lowercase : List[str] =['''The quick brown fox</s>''', '''jumps over the lazy dog</s>'''] lowercase : Any =[[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]] lowercase : Any =tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids'''] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any=6 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input lowercase : Tuple ='''This is a simple input''' lowercase : int =['''This is a simple input 1''', '''This is a simple input 2'''] lowercase : Optional[Any] =('''This is a simple input''', '''This is a pair''') lowercase : int =[ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests try: tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) except ValueError: self.fail('''Bloom Tokenizer should be able to deal with padding''' ) lowercase : Optional[int] =None # Hotfixing padding = None self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' ) # Simple input self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' ) # Simple input self.assertRaises( UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , ) # Pair input self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' ) # Pair input self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' ) # Pair input self.assertRaises( UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Union[str, Any] =self.get_rust_tokenizer() lowercase : Dict =load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ ) lowercase : Union[str, Any] =next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data lowercase : int =list(sample_data.values() ) lowercase : Any =list(map(tokenizer.encode , UpperCAmelCase__ ) ) lowercase : List[str] =[tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
88
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""", # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'wav2vec2' def __init__( self : List[Any] , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : Union[str, Any]=3072 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Optional[Any]=1E-5 , UpperCAmelCase__ : List[Any]="group" , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase__ : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase__ : Any=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : List[str]=128 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=0.05 , UpperCAmelCase__ : Union[str, Any]=10 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : List[str]=10 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[str]=320 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Tuple=100 , UpperCAmelCase__ : Optional[Any]=256 , UpperCAmelCase__ : List[Any]=256 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Dict="sum" , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Union[str, Any]=256 , UpperCAmelCase__ : Dict=(512, 512, 512, 512, 1500) , UpperCAmelCase__ : Optional[int]=(5, 3, 3, 1, 1) , UpperCAmelCase__ : Optional[int]=(1, 2, 3, 1, 1) , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Tuple=None , **UpperCAmelCase__ : Optional[Any] , ): '''simple docstring''' super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ ) lowercase : Union[str, Any] =hidden_size lowercase : Optional[int] =feat_extract_norm lowercase : Optional[int] =feat_extract_activation lowercase : Dict =list(UpperCAmelCase__ ) lowercase : str =list(UpperCAmelCase__ ) lowercase : str =list(UpperCAmelCase__ ) lowercase : List[Any] =conv_bias lowercase : List[Any] =num_conv_pos_embeddings lowercase : Dict =num_conv_pos_embedding_groups lowercase : Optional[int] =len(self.conv_dim ) lowercase : Any =num_hidden_layers lowercase : int =intermediate_size lowercase : List[str] =hidden_act lowercase : Dict =num_attention_heads lowercase : Tuple =hidden_dropout lowercase : Optional[int] =attention_dropout lowercase : Tuple =activation_dropout lowercase : str =feat_proj_dropout lowercase : int =final_dropout lowercase : Optional[Any] =layerdrop lowercase : Union[str, Any] =layer_norm_eps lowercase : List[str] =initializer_range lowercase : str =vocab_size lowercase : Optional[Any] =do_stable_layer_norm lowercase : List[Any] =use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase : List[Any] =apply_spec_augment lowercase : Optional[Any] =mask_time_prob lowercase : Union[str, Any] =mask_time_length lowercase : Optional[int] =mask_time_min_masks lowercase : Union[str, Any] =mask_feature_prob lowercase : Any =mask_feature_length lowercase : int =mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase : Optional[int] =num_codevectors_per_group lowercase : int =num_codevector_groups lowercase : Optional[Any] =contrastive_logits_temperature lowercase : str =feat_quantizer_dropout lowercase : List[str] =num_negatives lowercase : int =codevector_dim lowercase : Any =proj_codevector_dim lowercase : List[Any] =diversity_loss_weight # ctc loss lowercase : Tuple =ctc_loss_reduction lowercase : Optional[Any] =ctc_zero_infinity # adapter lowercase : Dict =add_adapter lowercase : str =adapter_kernel_size lowercase : Optional[Any] =adapter_stride lowercase : Union[str, Any] =num_adapter_layers lowercase : Union[str, Any] =output_hidden_size or hidden_size lowercase : Dict =adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase : Union[str, Any] =classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase : List[str] =list(UpperCAmelCase__ ) lowercase : List[str] =list(UpperCAmelCase__ ) lowercase : Dict =list(UpperCAmelCase__ ) lowercase : Optional[int] =xvector_output_dim @property def lowerCamelCase_ ( self : Any ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
88
'''simple docstring''' import math def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float: if ( not isinstance(__magic_name__ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * power_factor def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float: if ( not isinstance(__magic_name__ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
88
1
'''simple docstring''' import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> Optional[Any]: assert isinstance(__magic_name__ , __magic_name__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Dict ) -> Union[str, Any]: lowercase : str =tmp_path / '''cache''' lowercase : int ={'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase : List[Any] =TextDatasetReader(__magic_name__ , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ ).read() _check_text_dataset(__magic_name__ , __magic_name__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> str: lowercase : Union[str, Any] =tmp_path / '''cache''' lowercase : str ={'''text''': '''string'''} lowercase : int =features.copy() if features else default_expected_features lowercase : Tuple =( Features({feature: Value(__magic_name__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase : List[Any] =TextDatasetReader(__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ ).read() _check_text_dataset(__magic_name__ , __magic_name__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> Any: lowercase : Tuple =tmp_path / '''cache''' lowercase : Any ={'''text''': '''string'''} lowercase : List[str] =TextDatasetReader(__magic_name__ , cache_dir=__magic_name__ , split=__magic_name__ ).read() _check_text_dataset(__magic_name__ , __magic_name__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int ) -> Union[str, Any]: if issubclass(__magic_name__ , __magic_name__ ): lowercase : Tuple =text_path elif issubclass(__magic_name__ , __magic_name__ ): lowercase : List[Any] =[text_path] lowercase : Tuple =tmp_path / '''cache''' lowercase : Optional[int] ={'''text''': '''string'''} lowercase : List[str] =TextDatasetReader(__magic_name__ , cache_dir=__magic_name__ ).read() _check_text_dataset(__magic_name__ , __magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Dict=("train",) ) -> Tuple: assert isinstance(__magic_name__ , __magic_name__ ) for split in splits: lowercase : Tuple =dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : str ) -> int: lowercase : Tuple =tmp_path / '''cache''' lowercase : str ={'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase : Optional[Any] =TextDatasetReader({'''train''': text_path} , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ ).read() _check_text_datasetdict(__magic_name__ , __magic_name__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : Any ) -> Dict: lowercase : Optional[int] =tmp_path / '''cache''' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" lowercase : List[Any] ={'''text''': '''string'''} lowercase : List[Any] =features.copy() if features else default_expected_features lowercase : Union[str, Any] =( Features({feature: Value(__magic_name__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase : Dict =TextDatasetReader({'''train''': text_path} , features=__magic_name__ , cache_dir=__magic_name__ ).read() _check_text_datasetdict(__magic_name__ , __magic_name__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Tuple ) -> List[Any]: if split: lowercase : List[Any] ={split: text_path} else: lowercase : Optional[int] ='''train''' lowercase : str ={'''train''': text_path, '''test''': text_path} lowercase : Dict =tmp_path / '''cache''' lowercase : Tuple ={'''text''': '''string'''} lowercase : Optional[int] =TextDatasetReader(__magic_name__ , cache_dir=__magic_name__ ).read() _check_text_datasetdict(__magic_name__ , __magic_name__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
88
'''simple docstring''' import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ): '''simple docstring''' warnings.warn( '''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use CLIPImageProcessor instead.''' , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
88
1
'''simple docstring''' from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "geglu" , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "layer_norm" , UpperCAmelCase__ : bool = False , ): '''simple docstring''' super().__init__() lowercase : int =only_cross_attention lowercase : Tuple =(num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero''' lowercase : Optional[Any] =(num_embeds_ada_norm is not None) and norm_type == '''ada_norm''' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: lowercase : Union[str, Any] =AdaLayerNorm(UpperCAmelCase__ , UpperCAmelCase__ ) elif self.use_ada_layer_norm_zero: lowercase : List[Any] =AdaLayerNormZero(UpperCAmelCase__ , UpperCAmelCase__ ) else: lowercase : Tuple =nn.LayerNorm(UpperCAmelCase__ , elementwise_affine=UpperCAmelCase__ ) lowercase : List[Any] =Attention( query_dim=UpperCAmelCase__ , heads=UpperCAmelCase__ , dim_head=UpperCAmelCase__ , dropout=UpperCAmelCase__ , bias=UpperCAmelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCAmelCase__ , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. lowercase : List[Any] =( AdaLayerNorm(UpperCAmelCase__ , UpperCAmelCase__ ) if self.use_ada_layer_norm else nn.LayerNorm(UpperCAmelCase__ , elementwise_affine=UpperCAmelCase__ ) ) lowercase : Union[str, Any] =Attention( query_dim=UpperCAmelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCAmelCase__ , dim_head=UpperCAmelCase__ , dropout=UpperCAmelCase__ , bias=UpperCAmelCase__ , upcast_attention=UpperCAmelCase__ , ) # is self-attn if encoder_hidden_states is none else: lowercase : Union[str, Any] =None lowercase : Optional[Any] =None # 3. Feed-forward lowercase : Dict =nn.LayerNorm(UpperCAmelCase__ , elementwise_affine=UpperCAmelCase__ ) lowercase : str =FeedForward(UpperCAmelCase__ , dropout=UpperCAmelCase__ , activation_fn=UpperCAmelCase__ , final_dropout=UpperCAmelCase__ ) # let chunk size default to None lowercase : List[str] =None lowercase : List[str] =0 def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' # Sets chunk feed-forward lowercase : Optional[Any] =chunk_size lowercase : Optional[Any] =dim def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[torch.LongTensor] = None , UpperCAmelCase__ : Dict[str, Any] = None , UpperCAmelCase__ : Optional[torch.LongTensor] = None , ): '''simple docstring''' # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: lowercase : Dict =self.norma(UpperCAmelCase__ , UpperCAmelCase__ ) elif self.use_ada_layer_norm_zero: lowercase , lowercase , lowercase , lowercase , lowercase : str =self.norma( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hidden_dtype=hidden_states.dtype ) else: lowercase : Tuple =self.norma(UpperCAmelCase__ ) lowercase : List[str] =cross_attention_kwargs if cross_attention_kwargs is not None else {} lowercase : Optional[Any] =self.attna( UpperCAmelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) if self.use_ada_layer_norm_zero: lowercase : Optional[Any] =gate_msa.unsqueeze(1 ) * attn_output lowercase : Any =attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: lowercase : str =( self.norma(UpperCAmelCase__ , UpperCAmelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCAmelCase__ ) ) lowercase : int =self.attna( UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) lowercase : Tuple =attn_output + hidden_states # 3. Feed-forward lowercase : List[str] =self.norma(UpperCAmelCase__ ) if self.use_ada_layer_norm_zero: lowercase : Union[str, Any] =norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) lowercase : Any =norm_hidden_states.shape[self._chunk_dim] // self._chunk_size lowercase : List[str] =torch.cat( [self.ff(UpperCAmelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCAmelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: lowercase : str =self.ff(UpperCAmelCase__ ) if self.use_ada_layer_norm_zero: lowercase : int =gate_mlp.unsqueeze(1 ) * ff_output lowercase : Any =ff_output + hidden_states return hidden_states class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : str = "geglu" , UpperCAmelCase__ : bool = False , ): '''simple docstring''' super().__init__() lowercase : List[Any] =int(dim * mult ) lowercase : Optional[int] =dim_out if dim_out is not None else dim if activation_fn == "gelu": lowercase : List[Any] =GELU(UpperCAmelCase__ , UpperCAmelCase__ ) if activation_fn == "gelu-approximate": lowercase : int =GELU(UpperCAmelCase__ , UpperCAmelCase__ , approximate='''tanh''' ) elif activation_fn == "geglu": lowercase : Optional[int] =GEGLU(UpperCAmelCase__ , UpperCAmelCase__ ) elif activation_fn == "geglu-approximate": lowercase : Tuple =ApproximateGELU(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Optional[Any] =nn.ModuleList([] ) # project in self.net.append(UpperCAmelCase__ ) # project dropout self.net.append(nn.Dropout(UpperCAmelCase__ ) ) # project out self.net.append(nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(UpperCAmelCase__ ) ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int ): '''simple docstring''' for module in self.net: lowercase : List[Any] =module(UpperCAmelCase__ ) return hidden_states class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str = "none" ): '''simple docstring''' super().__init__() lowercase : str =nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Union[str, Any] =approximate def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[int] ): '''simple docstring''' if gate.device.type != "mps": return F.gelu(UpperCAmelCase__ , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[int] ): '''simple docstring''' lowercase : Optional[int] =self.proj(UpperCAmelCase__ ) lowercase : Optional[Any] =self.gelu(UpperCAmelCase__ ) return hidden_states class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ): '''simple docstring''' super().__init__() lowercase : List[Any] =nn.Linear(UpperCAmelCase__ , dim_out * 2 ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[Any] ): '''simple docstring''' if gate.device.type != "mps": return F.gelu(UpperCAmelCase__ ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase , lowercase : Union[str, Any] =self.proj(UpperCAmelCase__ ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(UpperCAmelCase__ ) class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : int ): '''simple docstring''' super().__init__() lowercase : Tuple =nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Any =self.proj(UpperCAmelCase__ ) return x * torch.sigmoid(1.7_02 * x ) class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] ): '''simple docstring''' super().__init__() lowercase : Tuple =nn.Embedding(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Dict =nn.SiLU() lowercase : int =nn.Linear(UpperCAmelCase__ , embedding_dim * 2 ) lowercase : List[Any] =nn.LayerNorm(UpperCAmelCase__ , elementwise_affine=UpperCAmelCase__ ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : List[Any] =self.linear(self.silu(self.emb(UpperCAmelCase__ ) ) ) lowercase , lowercase : Any =torch.chunk(UpperCAmelCase__ , 2 ) lowercase : Tuple =self.norm(UpperCAmelCase__ ) * (1 + scale) + shift return x class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ): '''simple docstring''' super().__init__() lowercase : int =CombinedTimestepLabelEmbeddings(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Any =nn.SiLU() lowercase : Optional[Any] =nn.Linear(UpperCAmelCase__ , 6 * embedding_dim , bias=UpperCAmelCase__ ) lowercase : Optional[int] =nn.LayerNorm(UpperCAmelCase__ , elementwise_affine=UpperCAmelCase__ , eps=1E-6 ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str=None ): '''simple docstring''' lowercase : Union[str, Any] =self.linear(self.silu(self.emb(UpperCAmelCase__ , UpperCAmelCase__ , hidden_dtype=UpperCAmelCase__ ) ) ) lowercase , lowercase , lowercase , lowercase , lowercase , lowercase : int =emb.chunk(6 , dim=1 ) lowercase : Dict =self.norm(UpperCAmelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : float = 1E-5 ): '''simple docstring''' super().__init__() lowercase : Tuple =num_groups lowercase : List[Any] =eps if act_fn is None: lowercase : Tuple =None else: lowercase : Optional[int] =get_activation(UpperCAmelCase__ ) lowercase : Optional[Any] =nn.Linear(UpperCAmelCase__ , out_dim * 2 ) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' if self.act: lowercase : List[Any] =self.act(UpperCAmelCase__ ) lowercase : Dict =self.linear(UpperCAmelCase__ ) lowercase : int =emb[:, :, None, None] lowercase , lowercase : List[Any] =emb.chunk(2 , dim=1 ) lowercase : str =F.group_norm(UpperCAmelCase__ , self.num_groups , eps=self.eps ) lowercase : int =x * (1 + scale) + shift return x
88
'''simple docstring''' import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser( description=( """Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned""" """ Distillation""" ) ) parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""]) parser.add_argument("""--model_name""", default="""roberta-large""", type=str) parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str) parser.add_argument("""--vocab_transform""", action="""store_true""") UpperCamelCase_ = parser.parse_args() if args.model_type == "roberta": UpperCamelCase_ = RobertaForMaskedLM.from_pretrained(args.model_name) UpperCamelCase_ = """roberta""" elif args.model_type == "gpt2": UpperCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name) UpperCamelCase_ = """transformer""" UpperCamelCase_ = model.state_dict() UpperCamelCase_ = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: UpperCamelCase_ = state_dict[f'''{prefix}.{param_name}'''] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: UpperCamelCase_ = f'''{prefix}.embeddings.{w}.weight''' UpperCamelCase_ = state_dict[param_name] for w in ["weight", "bias"]: UpperCamelCase_ = f'''{prefix}.embeddings.LayerNorm.{w}''' UpperCamelCase_ = state_dict[param_name] # Transformer Blocks # UpperCamelCase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[ f'''{prefix}.h.{teacher_idx}.{layer}.{w}''' ] UpperCamelCase_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias'''] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}''' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: UpperCamelCase_ = state_dict[f'''{layer}'''] if args.vocab_transform: for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[f'''lm_head.dense.{w}'''] UpperCamelCase_ = state_dict[f'''lm_head.layer_norm.{w}'''] elif args.model_type == "gpt2": for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[f'''{prefix}.ln_f.{w}'''] UpperCamelCase_ = state_dict["""lm_head.weight"""] print(f'''N layers selected for distillation: {std_idx}''') print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
88
1
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int ) -> int: lowercase : Optional[int] =1 for i in range(1 , num + 1 ): fact *= i return fact def _lowerCAmelCase ( __magic_name__ : int ) -> int: lowercase : str =0 while number > 0: lowercase : Optional[Any] =number % 10 sum_of_digits += last_digit lowercase : Any =number // 10 # Removing the last_digit from the given number return sum_of_digits def _lowerCAmelCase ( __magic_name__ : int = 100 ) -> int: lowercase : List[Any] =factorial(__magic_name__ ) lowercase : Union[str, Any] =split_and_add(__magic_name__ ) return result if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
88
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict: for param in module.parameters(): lowercase : List[str] =False def _lowerCAmelCase ( ) -> List[str]: lowercase : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowercase : Optional[int] ='''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> str: lowercase : Optional[int] =plt.imshow(__magic_name__ ) fig.axes.get_xaxis().set_visible(__magic_name__ ) fig.axes.get_yaxis().set_visible(__magic_name__ ) plt.show() def _lowerCAmelCase ( ) -> List[Any]: lowercase : Any =datetime.now() lowercase : Dict =current_time.strftime('''%H:%M:%S''' ) return timestamp
88
1
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : list ) -> list: if any(not isinstance(__magic_name__ , __magic_name__ ) or x < 0 for x in sequence ): raise TypeError('''Sequence must be list of non-negative integers''' ) for _ in range(len(__magic_name__ ) ): for i, (rod_upper, rod_lower) in enumerate(zip(__magic_name__ , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
88
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _lowerCAmelCase ( ) -> List[Any]: lowercase : Tuple =HfArgumentParser(__magic_name__ ) lowercase : Union[str, Any] =parser.parse_args_into_dataclasses()[0] lowercase : Any =TensorFlowBenchmark(args=__magic_name__ ) try: lowercase : List[Any] =parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase : List[Any] ='''Arg --no_{0} is no longer used, please use --no-{0} instead.''' lowercase : Any =''' '''.join(str(__magic_name__ ).split(''' ''' )[:-1] ) lowercase : Optional[Any] ='''''' lowercase : List[str] =eval(str(__magic_name__ ).split(''' ''' )[-1] ) lowercase : Optional[Any] =[] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__magic_name__ ) if len(__magic_name__ ) > 0: lowercase : int =full_error_msg + begin_error_msg + str(__magic_name__ ) raise ValueError(__magic_name__ ) benchmark.run() if __name__ == "__main__": main()
88
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json""" ), """google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""", """google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""", """google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""", """google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""", # See all REALM models at https://huggingface.co/models?filter=realm } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'realm' def __init__( self : List[Any] , UpperCAmelCase__ : Union[str, Any]=30522 , UpperCAmelCase__ : Optional[Any]=768 , UpperCAmelCase__ : int=128 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : List[Any]=8 , UpperCAmelCase__ : List[Any]=3072 , UpperCAmelCase__ : Dict="gelu_new" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : int=512 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=1E-12 , UpperCAmelCase__ : Optional[Any]=256 , UpperCAmelCase__ : List[str]=10 , UpperCAmelCase__ : Tuple=1E-3 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Optional[int]=320 , UpperCAmelCase__ : Optional[Any]=13353718 , UpperCAmelCase__ : List[str]=5000 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : str=2 , **UpperCAmelCase__ : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) # Common config lowercase : str =vocab_size lowercase : Tuple =max_position_embeddings lowercase : str =hidden_size lowercase : List[str] =retriever_proj_size lowercase : int =num_hidden_layers lowercase : List[str] =num_attention_heads lowercase : Any =num_candidates lowercase : List[str] =intermediate_size lowercase : int =hidden_act lowercase : str =hidden_dropout_prob lowercase : List[Any] =attention_probs_dropout_prob lowercase : Optional[Any] =initializer_range lowercase : List[str] =type_vocab_size lowercase : Union[str, Any] =layer_norm_eps # Reader config lowercase : Optional[int] =span_hidden_size lowercase : Optional[Any] =max_span_width lowercase : Dict =reader_layer_norm_eps lowercase : Any =reader_beam_size lowercase : List[Any] =reader_seq_len # Retrieval config lowercase : Dict =num_block_records lowercase : Any =searcher_beam_size
88
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> bool: lowercase : str =len(__magic_name__ ) # We need to create solution object to save path. lowercase : int =[[0 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )] lowercase : List[Any] =run_maze(__magic_name__ , 0 , 0 , __magic_name__ ) if solved: print('''\n'''.join(str(__magic_name__ ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[list[int]] ) -> bool: lowercase : Optional[int] =len(__magic_name__ ) # Final check point. if i == j == (size - 1): lowercase : Optional[int] =1 return True lowercase : Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds lowercase : Tuple =(i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. lowercase : Union[str, Any] =(not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited lowercase : Union[str, Any] =1 # check for directions if ( run_maze(__magic_name__ , i + 1 , __magic_name__ , __magic_name__ ) or run_maze(__magic_name__ , __magic_name__ , j + 1 , __magic_name__ ) or run_maze(__magic_name__ , i - 1 , __magic_name__ , __magic_name__ ) or run_maze(__magic_name__ , __magic_name__ , j - 1 , __magic_name__ ) ): return True lowercase : str =0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
88
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class __SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , UpperCAmelCase__ : List[Any] , ): '''simple docstring''' lowercase : List[str] =parent lowercase : str =13 lowercase : Optional[int] =7 lowercase : Optional[int] =True lowercase : Any =True lowercase : List[Any] =False lowercase : Optional[Any] =True lowercase : Dict =99 lowercase : Union[str, Any] =32 lowercase : List[str] =2 lowercase : Union[str, Any] =4 lowercase : int =37 lowercase : Tuple ='''gelu''' lowercase : Any =0.1 lowercase : List[str] =0.1 lowercase : Dict =512 lowercase : Optional[Any] =16 lowercase : List[str] =2 lowercase : str =0.02 lowercase : int =3 lowercase : int =4 lowercase : Tuple =None def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : str =None if self.use_input_mask: lowercase : str =random_attention_mask([self.batch_size, self.seq_length] ) lowercase : List[str] =None lowercase : Optional[Any] =None lowercase : str =None if self.use_labels: lowercase : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Any =ids_tensor([self.batch_size] , self.num_choices ) lowercase : Optional[int] =DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] ): '''simple docstring''' lowercase : int =TFDistilBertModel(config=UpperCAmelCase__ ) lowercase : Dict ={'''input_ids''': input_ids, '''attention_mask''': input_mask} lowercase : List[str] =model(UpperCAmelCase__ ) lowercase : str =[input_ids, input_mask] lowercase : str =model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : str =TFDistilBertForMaskedLM(config=UpperCAmelCase__ ) lowercase : List[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask} lowercase : int =model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] ): '''simple docstring''' lowercase : Dict =TFDistilBertForQuestionAnswering(config=UpperCAmelCase__ ) lowercase : Optional[Any] ={ '''input_ids''': input_ids, '''attention_mask''': input_mask, } lowercase : Any =model(UpperCAmelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Union[str, Any] =self.num_labels lowercase : Tuple =TFDistilBertForSequenceClassification(UpperCAmelCase__ ) lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask} lowercase : List[str] =model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Tuple =self.num_choices lowercase : Optional[Any] =TFDistilBertForMultipleChoice(UpperCAmelCase__ ) lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) ) lowercase : str =tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) ) lowercase : Optional[int] ={ '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, } lowercase : List[Any] =model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ): '''simple docstring''' lowercase : Optional[int] =self.num_labels lowercase : Dict =TFDistilBertForTokenClassification(UpperCAmelCase__ ) lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask} lowercase : Dict =model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Optional[int] =self.prepare_config_and_inputs() ((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : List[Any] =config_and_inputs lowercase : Union[str, Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) lowerCamelCase_ = ( { 'feature-extraction': TFDistilBertModel, 'fill-mask': TFDistilBertForMaskedLM, 'question-answering': TFDistilBertForQuestionAnswering, 'text-classification': TFDistilBertForSequenceClassification, 'token-classification': TFDistilBertForTokenClassification, 'zero-shot': TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Union[str, Any] =TFDistilBertModelTester(self ) lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , dim=37 ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Dict ): '''simple docstring''' for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): lowercase : Tuple =TFDistilBertModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : Tuple =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' ) lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] ) lowercase : Any =model(UpperCAmelCase__ )[0] lowercase : List[Any] =[1, 6, 768] self.assertEqual(output.shape , UpperCAmelCase__ ) lowercase : int =tf.constant( [ [ [0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99], [0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04], [0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 )
88
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM lowercase : Any =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ ) @torch.no_grad() def __call__( self : List[Any] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ): '''simple docstring''' # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size , UpperCAmelCase__ ): lowercase : Optional[int] =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowercase : Optional[int] =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowercase : str =randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCAmelCase__ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowercase : Dict =self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowercase : Dict =self.scheduler.step( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample lowercase : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 ) lowercase : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase : List[str] =self.numpy_to_pil(UpperCAmelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase__ )
88
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCamelCase_ = { """configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongT5EncoderModel""", """LongT5ForConditionalGeneration""", """LongT5Model""", """LongT5PreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """FlaxLongT5ForConditionalGeneration""", """FlaxLongT5Model""", """FlaxLongT5PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
'''simple docstring''' import argparse import copy def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]: lowercase : int ={} with open(__magic_name__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: lowercase : List[str] =[] _list.append([line.split()[1], line.split()[2]] ) lowercase : Tuple =_list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: lowercase : List[Any] =[] _list.append([line.split()[0], line.split()[2]] ) lowercase : Union[str, Any] =_list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> str: with open(__magic_name__ ) as f: lowercase : Optional[int] =f.read(1 ) lowercase : List[Any] =start_node lowercase : List[Any] =[] lowercase : str =start_node lowercase : str =0 while visiting not in first_solution: lowercase : Optional[int] =10000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution: lowercase : List[Any] =k[1] lowercase : str =k[0] first_solution.append(__magic_name__ ) lowercase : Any =distance_of_first_solution + int(__magic_name__ ) lowercase : Optional[int] =best_node first_solution.append(__magic_name__ ) lowercase : str =0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 lowercase : str =( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10000 ) return first_solution, distance_of_first_solution def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Any ) -> Tuple: lowercase : Tuple =[] for n in solution[1:-1]: lowercase : Dict =solution.index(__magic_name__ ) for kn in solution[1:-1]: lowercase : Tuple =solution.index(__magic_name__ ) if n == kn: continue lowercase : Union[str, Any] =copy.deepcopy(__magic_name__ ) lowercase : Optional[int] =kn lowercase : List[Any] =n lowercase : List[Any] =0 for k in _tmp[:-1]: lowercase : Optional[int] =_tmp[_tmp.index(__magic_name__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: lowercase : Optional[int] =distance + int(i[1] ) _tmp.append(__magic_name__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) lowercase : Union[str, Any] =len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Union[str, Any]: lowercase : str =1 lowercase : List[Any] =first_solution lowercase : Any =[] lowercase : str =distance_of_first_solution lowercase : str =solution while count <= iters: lowercase : Union[str, Any] =find_neighborhood(__magic_name__ , __magic_name__ ) lowercase : Dict =0 lowercase : int =neighborhood[index_of_best_solution] lowercase : Optional[int] =len(__magic_name__ ) - 1 lowercase : List[Any] =False while not found: lowercase : List[Any] =0 while i < len(__magic_name__ ): if best_solution[i] != solution[i]: lowercase : List[str] =best_solution[i] lowercase : Dict =solution[i] break lowercase : Any =i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) lowercase : str =True lowercase : int =best_solution[:-1] lowercase : Any =neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: lowercase : Optional[int] =cost lowercase : str =solution else: lowercase : Optional[int] =index_of_best_solution + 1 lowercase : List[Any] =neighborhood[index_of_best_solution] if len(__magic_name__ ) >= size: tabu_list.pop(0 ) lowercase : Optional[int] =count + 1 return best_solution_ever, best_cost def _lowerCAmelCase ( __magic_name__ : str=None ) -> Tuple: lowercase : List[str] =generate_neighbours(args.File ) lowercase , lowercase : Optional[Any] =generate_first_solution( args.File , __magic_name__ ) lowercase , lowercase : int =tabu_search( __magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , ) print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""") parser.add_argument( """-f""", """--File""", type=str, help="""Path to the file containing the data""", required=True, ) parser.add_argument( """-i""", """--Iterations""", type=int, help="""How many iterations the algorithm should perform""", required=True, ) parser.add_argument( """-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True ) # Pass the arguments to main method main(parser.parse_args())
88
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'rwkv' lowerCamelCase_ = {'max_position_embeddings': 'context_length'} def __init__( self : List[str] , UpperCAmelCase__ : Tuple=50277 , UpperCAmelCase__ : Dict=1024 , UpperCAmelCase__ : Tuple=4096 , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=1E-5 , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : Any=True , **UpperCAmelCase__ : str , ): '''simple docstring''' lowercase : Optional[int] =vocab_size lowercase : Optional[int] =context_length lowercase : str =hidden_size lowercase : int =num_hidden_layers lowercase : int =attention_hidden_size if attention_hidden_size is not None else hidden_size lowercase : Tuple =intermediate_size if intermediate_size is not None else 4 * hidden_size lowercase : Optional[Any] =layer_norm_epsilon lowercase : Optional[int] =rescale_every lowercase : Optional[Any] =use_cache lowercase : Optional[int] =bos_token_id lowercase : List[Any] =eos_token_id super().__init__( tie_word_embeddings=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
88
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int = 1000000 ) -> int: lowercase : Dict =set(range(3 , __magic_name__ , 2 ) ) primes.add(2 ) for p in range(3 , __magic_name__ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , __magic_name__ , __magic_name__ ) ) ) lowercase : List[Any] =[float(__magic_name__ ) for n in range(limit + 1 )] for p in primes: for n in range(__magic_name__ , limit + 1 , __magic_name__ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f'''{solution() = }''')
88
1
'''simple docstring''' from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __SCREAMING_SNAKE_CASE ( lowercase__ ): @slow @require_torch def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : Optional[int] =EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' ) lowercase : str =BertTokenizer.from_pretrained('''bert-base-uncased''' ) lowercase : List[str] =bertabert.config.encoder.vocab_size lowercase : Tuple =tokenizer.sep_token_id lowercase : Tuple =tokenizer.cls_token_id lowercase : Optional[int] =128 lowercase : List[str] =datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' ) lowercase : List[Any] =datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' ) lowercase : List[str] =train_dataset.select(range(32 ) ) lowercase : Optional[Any] =val_dataset.select(range(16 ) ) lowercase : Optional[int] =4 def _map_to_encoder_decoder_inputs(UpperCAmelCase__ : str ): # Tokenizer will automatically set [BOS] <text> [EOS] lowercase : Tuple =tokenizer(batch['''article'''] , padding='''max_length''' , truncation=UpperCAmelCase__ , max_length=512 ) lowercase : Union[str, Any] =tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=UpperCAmelCase__ , max_length=128 ) lowercase : Optional[int] =inputs.input_ids lowercase : List[Any] =inputs.attention_mask lowercase : List[Any] =outputs.input_ids lowercase : Optional[int] =outputs.input_ids.copy() lowercase : List[Any] =[ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels'''] ] lowercase : Dict =outputs.attention_mask assert all(len(UpperCAmelCase__ ) == 512 for x in inputs.input_ids ) assert all(len(UpperCAmelCase__ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCAmelCase__ : Optional[int] ): lowercase : Optional[Any] =pred.label_ids lowercase : Union[str, Any] =pred.predictions # all unnecessary tokens are removed lowercase : Union[str, Any] =tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) lowercase : str =sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase__ ) )] ) / len(UpperCAmelCase__ ) return {"accuracy": accuracy} # map train dataset lowercase : Union[str, Any] =train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , remove_columns=['''article''', '''highlights'''] , ) train_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) # same for validation dataset lowercase : List[str] =val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , remove_columns=['''article''', '''highlights'''] , ) val_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) lowercase : str =self.get_auto_remove_tmp_dir() lowercase : int =SeqaSeqTrainingArguments( output_dir=UpperCAmelCase__ , per_device_train_batch_size=UpperCAmelCase__ , per_device_eval_batch_size=UpperCAmelCase__ , predict_with_generate=UpperCAmelCase__ , evaluation_strategy='''steps''' , do_train=UpperCAmelCase__ , do_eval=UpperCAmelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer lowercase : Union[str, Any] =SeqaSeqTrainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , ) # start training trainer.train()
88
'''simple docstring''' import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = BioGptTokenizer lowerCamelCase_ = False def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase : List[str] =[ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] lowercase : Any =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) lowercase : Union[str, Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase__ ) ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Dict ='''lower newer''' lowercase : str ='''lower newer''' return input_text, output_text def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : List[Any] =BioGptTokenizer(self.vocab_file , self.merges_file ) lowercase : Any ='''lower''' lowercase : int =['''low''', '''er</w>'''] lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Optional[int] =tokens + ['''<unk>'''] lowercase : Any =[14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Dict =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) lowercase : List[str] =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ ) lowercase : Optional[int] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ ) lowercase : str =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) lowercase : Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
88
1
'''simple docstring''' import heapq import sys import numpy as np UpperCamelCase_ = tuple[int, int] class __SCREAMING_SNAKE_CASE : def __init__( self : Dict ): '''simple docstring''' lowercase : Dict =[] lowercase : Tuple =set() def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' if not self.empty(): return self.elements[0][0] else: return float('''inf''' ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return len(self.elements ) == 0 def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str ): '''simple docstring''' if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(UpperCAmelCase__ ) else: # update # print("update", item) lowercase : List[Any] =[] ((lowercase) , (lowercase)) : str =heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((lowercase) , (lowercase)) : List[Any] =heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : int ): '''simple docstring''' if item in self.set: self.set.remove(UpperCAmelCase__ ) lowercase : Union[str, Any] =[] ((lowercase) , (lowercase)) : List[Any] =heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((lowercase) , (lowercase)) : Union[str, Any] =heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' return self.elements[0][1] def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' ((lowercase) , (lowercase)) : str =heapq.heappop(self.elements ) self.set.remove(UpperCAmelCase__ ) return (priority, item) def _lowerCAmelCase ( __magic_name__ : TPos , __magic_name__ : TPos ) -> int: # euclidean distance lowercase : Tuple =np.array(__magic_name__ ) lowercase : Dict =np.array(__magic_name__ ) return np.linalg.norm(a - b ) def _lowerCAmelCase ( __magic_name__ : TPos , __magic_name__ : TPos ) -> Dict: # integer division by time variable return consistent_heuristic(__magic_name__ , __magic_name__ ) // t def _lowerCAmelCase ( __magic_name__ : TPos , __magic_name__ : TPos ) -> List[Any]: # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def _lowerCAmelCase ( __magic_name__ : TPos , __magic_name__ : int , __magic_name__ : TPos , __magic_name__ : dict[TPos, float] ) -> int: lowercase : Tuple =g_function[start] + Wa * heuristics[i](__magic_name__ , __magic_name__ ) return ans def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : int ) -> Optional[Any]: lowercase : List[str] =np.chararray((n, n) ) for i in range(__magic_name__ ): for j in range(__magic_name__ ): lowercase : int ='''*''' for i in range(__magic_name__ ): for j in range(__magic_name__ ): if (j, (n - 1) - i) in blocks: lowercase : Dict ='''#''' lowercase : List[Any] ='''-''' lowercase : Dict =back_pointer[goal] while x != start: ((lowercase) , (lowercase)) : str =x # print(x) lowercase : List[str] ='''-''' lowercase : int =back_pointer[x] lowercase : Union[str, Any] ='''-''' for i in range(__magic_name__ ): for j in range(__magic_name__ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) lowercase : List[Any] =back_pointer[goal] while x != start: print(__magic_name__ , end=''' ''' ) lowercase : Dict =back_pointer[x] print(__magic_name__ ) sys.exit() def _lowerCAmelCase ( __magic_name__ : TPos ) -> int: if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : Optional[int] , ) -> List[str]: for itera in range(__magic_name__ ): open_list[itera].remove_element(__magic_name__ ) # print("s", s) # print("j", j) ((lowercase) , (lowercase)) : str =s lowercase : Union[str, Any] =(x - 1, y) lowercase : Dict =(x + 1, y) lowercase : int =(x, y + 1) lowercase : str =(x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(__magic_name__ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(__magic_name__ ) lowercase : Optional[int] =-1 lowercase : Optional[Any] =float('''inf''' ) if valid(__magic_name__ ) and g_function[neighbours] > g_function[s] + 1: lowercase : Optional[int] =g_function[s] + 1 lowercase : List[Any] =s if neighbours not in close_list_anchor: open_list[0].put(__magic_name__ , key(__magic_name__ , 0 , __magic_name__ , __magic_name__ ) ) if neighbours not in close_list_inad: for var in range(1 , __magic_name__ ): if key(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) <= Wa * key( __magic_name__ , 0 , __magic_name__ , __magic_name__ ): open_list[j].put( __magic_name__ , key(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) ) def _lowerCAmelCase ( ) -> Optional[Any]: lowercase : str =[] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list UpperCamelCase_ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} UpperCamelCase_ = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] UpperCamelCase_ = make_common_ground() UpperCamelCase_ = blocks_blk # hyper parameters UpperCamelCase_ = 1 UpperCamelCase_ = 1 UpperCamelCase_ = 20 UpperCamelCase_ = 3 # one consistent and two other inconsistent # start and end destination UpperCamelCase_ = (0, 0) UpperCamelCase_ = (n - 1, n - 1) UpperCamelCase_ = 1 def _lowerCAmelCase ( __magic_name__ : TPos , __magic_name__ : TPos , __magic_name__ : int ) -> List[Any]: lowercase : Any ={start: 0, goal: float('''inf''' )} lowercase : Optional[int] ={start: -1, goal: -1} lowercase : Any =[] lowercase : Union[str, Any] =set() for i in range(__magic_name__ ): open_list.append(PriorityQueue() ) open_list[i].put(__magic_name__ , key(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) ) lowercase : list[int] =[] lowercase : list[int] =[] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , __magic_name__ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(__magic_name__ , __magic_name__ , __magic_name__ ) else: lowercase , lowercase : Tuple =open_list[i].top_show() visited.add(__magic_name__ ) expand_state( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) close_list_inad.append(__magic_name__ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(__magic_name__ , __magic_name__ , __magic_name__ ) else: lowercase : Union[str, Any] =open_list[0].top_show() visited.add(__magic_name__ ) expand_state( __magic_name__ , 0 , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) close_list_anchor.append(__magic_name__ ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(__magic_name__ ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
88
'''simple docstring''' import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=4 , ): '''simple docstring''' lowercase : int =parent lowercase : List[str] =batch_size lowercase : str =seq_length lowercase : Optional[Any] =is_training lowercase : Union[str, Any] =use_attention_mask lowercase : Optional[Any] =use_token_type_ids lowercase : Tuple =use_labels lowercase : List[str] =vocab_size lowercase : List[str] =hidden_size lowercase : Tuple =num_hidden_layers lowercase : Any =num_attention_heads lowercase : List[str] =intermediate_size lowercase : Optional[Any] =hidden_act lowercase : Dict =hidden_dropout_prob lowercase : List[Any] =attention_probs_dropout_prob lowercase : Optional[Any] =max_position_embeddings lowercase : Tuple =type_vocab_size lowercase : Optional[int] =type_sequence_label_size lowercase : Optional[Any] =initializer_range lowercase : Optional[int] =num_choices def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Union[str, Any] =None if self.use_attention_mask: lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) lowercase : Tuple =None if self.use_token_type_ids: lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase : int =RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : List[Any] =self.prepare_config_and_inputs() lowercase , lowercase , lowercase , lowercase : str =config_and_inputs lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : List[str] =self.prepare_config_and_inputs() lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs lowercase : List[str] =True lowercase : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = True lowerCamelCase_ = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =FlaxRobertaModelTester(self ) @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase : Optional[int] =model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCAmelCase__ ) lowercase : List[Any] =model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase__ )
88
1
'''simple docstring''' from cva import destroyAllWindows, imread, imshow, waitKey def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Any: # getting number of pixels in the image lowercase , lowercase : str =img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(__magic_name__ ): for j in range(__magic_name__ ): lowercase : Tuple =[255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image UpperCamelCase_ = imread("""image_data/lena.jpg""", 1) # convert to its negative UpperCamelCase_ = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
88
'''simple docstring''' import mpmath # for roots of unity import numpy as np class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None ): '''simple docstring''' # Input as list lowercase : Optional[int] =list(poly_a or [0] )[:] lowercase : Optional[Any] =list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() lowercase : Any =len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() lowercase : Dict =len(self.polyB ) # Add 0 to make lengths equal a power of 2 lowercase : int =int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform lowercase : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product lowercase : Tuple =self.__multiply() def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Union[str, Any] =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB] # Corner case if len(UpperCAmelCase__ ) <= 1: return dft[0] # lowercase : Any =self.c_max_length // 2 while next_ncol > 0: lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )] lowercase : Tuple =self.root**next_ncol # First half of next step lowercase : str =1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(UpperCAmelCase__ ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step lowercase : int =1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(UpperCAmelCase__ ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update lowercase : Dict =new_dft lowercase : Tuple =next_ncol // 2 return dft[0] def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Any =self.__dft('''A''' ) lowercase : Any =self.__dft('''B''' ) lowercase : Optional[int] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT lowercase : Optional[int] =2 while next_ncol <= self.c_max_length: lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )] lowercase : List[str] =self.root ** (next_ncol // 2) lowercase : Optional[int] =1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update lowercase : List[Any] =new_inverse_c next_ncol *= 2 # Unpack lowercase : Tuple =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : Any ): '''simple docstring''' lowercase : Any ='''A = ''' + ''' + '''.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) lowercase : Tuple ='''B = ''' + ''' + '''.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) lowercase : List[str] ='''A*B = ''' + ''' + '''.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return F'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
88
1
'''simple docstring''' import argparse import os import re UpperCamelCase_ = """src/transformers""" # Pattern that looks at the indentation in a line. UpperCamelCase_ = re.compile(r"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""") def _lowerCAmelCase ( __magic_name__ : int ) -> List[Any]: lowercase : List[Any] =_re_indent.search(__magic_name__ ) return "" if search is None else search.groups()[0] def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Dict="" , __magic_name__ : Dict=None , __magic_name__ : Dict=None ) -> Union[str, Any]: lowercase : str =0 lowercase : int =code.split('''\n''' ) if start_prompt is not None: while not lines[index].startswith(__magic_name__ ): index += 1 lowercase : List[Any] =['''\n'''.join(lines[:index] )] else: lowercase : int =[] # We split into blocks until we get to the `end_prompt` (or the end of the block). lowercase : Dict =[lines[index]] index += 1 while index < len(__magic_name__ ) and (end_prompt is None or not lines[index].startswith(__magic_name__ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(__magic_name__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ): current_block.append(lines[index] ) blocks.append('''\n'''.join(__magic_name__ ) ) if index < len(__magic_name__ ) - 1: lowercase : Optional[int] =[lines[index + 1]] index += 1 else: lowercase : Union[str, Any] =[] else: blocks.append('''\n'''.join(__magic_name__ ) ) lowercase : Dict =[lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(__magic_name__ ) > 0: blocks.append('''\n'''.join(__magic_name__ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(__magic_name__ ): blocks.append('''\n'''.join(lines[index:] ) ) return blocks def _lowerCAmelCase ( __magic_name__ : Optional[Any] ) -> Any: def _inner(__magic_name__ : List[Any] ): return key(__magic_name__ ).lower().replace('''_''' , '''''' ) return _inner def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Optional[int]=None ) -> Dict: # If no key is provided, we use a noop. def noop(__magic_name__ : Optional[Any] ): return x if key is None: lowercase : Any =noop # Constants are all uppercase, they go first. lowercase : List[Any] =[obj for obj in objects if key(__magic_name__ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. lowercase : Optional[Any] =[obj for obj in objects if key(__magic_name__ )[0].isupper() and not key(__magic_name__ ).isupper()] # Functions begin with a lowercase, they go last. lowercase : Tuple =[obj for obj in objects if not key(__magic_name__ )[0].isupper()] lowercase : List[Any] =ignore_underscore(__magic_name__ ) return sorted(__magic_name__ , key=__magic_name__ ) + sorted(__magic_name__ , key=__magic_name__ ) + sorted(__magic_name__ , key=__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Optional[int]: # This inner function sort imports between [ ]. def _replace(__magic_name__ : int ): lowercase : str =match.groups()[0] if "," not in imports: return f'''[{imports}]''' lowercase : List[str] =[part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowercase : Tuple =keys[:-1] return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(__magic_name__ )] ) + "]" lowercase : List[Any] =import_statement.split('''\n''' ) if len(__magic_name__ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. lowercase : int =2 if lines[1].strip() == '''[''' else 1 lowercase : Optional[Any] =[(i, _re_strip_line.search(__magic_name__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] lowercase : str =sort_objects(__magic_name__ , key=lambda __magic_name__ : x[1] ) lowercase : Tuple =[lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(__magic_name__ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: lowercase : Tuple =_re_bracket_content.sub(_replace , lines[1] ) else: lowercase : str =[part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowercase : int =keys[:-1] lowercase : Optional[int] =get_indent(lines[1] ) + ''', '''.join([f'''"{k}"''' for k in sort_objects(__magic_name__ )] ) return "\n".join(__magic_name__ ) else: # Finally we have to deal with imports fitting on one line lowercase : List[Any] =_re_bracket_content.sub(_replace , __magic_name__ ) return import_statement def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Tuple=True ) -> int: with open(__magic_name__ , encoding='''utf-8''' ) as f: lowercase : List[str] =f.read() if "_import_structure" not in code: return # Blocks of indent level 0 lowercase : int =split_code_in_indented_blocks( __magic_name__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(__magic_name__ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. lowercase : str =main_blocks[block_idx] lowercase : Tuple =block.split('''\n''' ) # Get to the start of the imports. lowercase : Optional[Any] =0 while line_idx < len(__magic_name__ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: lowercase : str =len(__magic_name__ ) else: line_idx += 1 if line_idx >= len(__magic_name__ ): continue # Ignore beginning and last line: they don't contain anything. lowercase : Any ='''\n'''.join(block_lines[line_idx:-1] ) lowercase : str =get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. lowercase : List[str] =split_code_in_indented_blocks(__magic_name__ , indent_level=__magic_name__ ) # We have two categories of import key: list or _import_structure[key].append/extend lowercase : Optional[Any] =_re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. lowercase : List[str] =[(pattern.search(__magic_name__ ).groups()[0] if pattern.search(__magic_name__ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. lowercase : Optional[Any] =[(i, key) for i, key in enumerate(__magic_name__ ) if key is not None] lowercase : Optional[Any] =[x[0] for x in sorted(__magic_name__ , key=lambda __magic_name__ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. lowercase : Dict =0 lowercase : Tuple =[] for i in range(len(__magic_name__ ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: lowercase : Any =sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(__magic_name__ ) count += 1 # And we put our main block back together with its first and last line. lowercase : Tuple ='''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(__magic_name__ ): if check_only: return True else: print(f'''Overwriting {file}.''' ) with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(__magic_name__ ) ) def _lowerCAmelCase ( __magic_name__ : Union[str, Any]=True ) -> int: lowercase : Dict =[] for root, _, files in os.walk(__magic_name__ ): if "__init__.py" in files: lowercase : int =sort_imports(os.path.join(__magic_name__ , '''__init__.py''' ) , check_only=__magic_name__ ) if result: lowercase : Optional[Any] =[os.path.join(__magic_name__ , '''__init__.py''' )] if len(__magic_name__ ) > 0: raise ValueError(f'''Would overwrite {len(__magic_name__ )} files, run `make style`.''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") UpperCamelCase_ = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
88
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""PLBartTokenizer"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """PLBartForCausalLM""", """PLBartForConditionalGeneration""", """PLBartForSequenceClassification""", """PLBartModel""", """PLBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
88
1
'''simple docstring''' from __future__ import annotations import queue class __SCREAMING_SNAKE_CASE : def __init__( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : int =data lowercase : Tuple =None lowercase : Dict =None def _lowerCAmelCase ( ) -> TreeNode: print('''\n********Press N to stop entering at any point of time********\n''' ) lowercase : Dict =input('''Enter the value of the root node: ''' ).strip().lower() lowercase : queue.Queue =queue.Queue() lowercase : str =TreeNode(int(__magic_name__ ) ) q.put(__magic_name__ ) while not q.empty(): lowercase : List[Any] =q.get() lowercase : Any =f'''Enter the left node of {node_found.data}: ''' lowercase : Dict =input(__magic_name__ ).strip().lower() or '''n''' if check == "n": return tree_node lowercase : int =TreeNode(int(__magic_name__ ) ) lowercase : List[str] =left_node q.put(__magic_name__ ) lowercase : Union[str, Any] =f'''Enter the right node of {node_found.data}: ''' lowercase : str =input(__magic_name__ ).strip().lower() or '''n''' if check == "n": return tree_node lowercase : List[Any] =TreeNode(int(__magic_name__ ) ) lowercase : Dict =right_node q.put(__magic_name__ ) raise def _lowerCAmelCase ( __magic_name__ : TreeNode ) -> None: if not isinstance(__magic_name__ , __magic_name__ ) or not node: return print(node.data , end=''',''' ) pre_order(node.left ) pre_order(node.right ) def _lowerCAmelCase ( __magic_name__ : TreeNode ) -> None: if not isinstance(__magic_name__ , __magic_name__ ) or not node: return in_order(node.left ) print(node.data , end=''',''' ) in_order(node.right ) def _lowerCAmelCase ( __magic_name__ : TreeNode ) -> None: if not isinstance(__magic_name__ , __magic_name__ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=''',''' ) def _lowerCAmelCase ( __magic_name__ : TreeNode ) -> None: if not isinstance(__magic_name__ , __magic_name__ ) or not node: return lowercase : queue.Queue =queue.Queue() q.put(__magic_name__ ) while not q.empty(): lowercase : Optional[int] =q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _lowerCAmelCase ( __magic_name__ : TreeNode ) -> None: if not isinstance(__magic_name__ , __magic_name__ ) or not node: return lowercase : queue.Queue =queue.Queue() q.put(__magic_name__ ) while not q.empty(): lowercase : Dict =[] while not q.empty(): lowercase : Tuple =q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : TreeNode ) -> None: if not isinstance(__magic_name__ , __magic_name__ ) or not node: return lowercase : list[TreeNode] =[] lowercase : Dict =node while n or stack: while n: # start from root node, find its left child print(n.data , end=''',''' ) stack.append(__magic_name__ ) lowercase : Optional[int] =n.left # end of while means current node doesn't have left child lowercase : int =stack.pop() # start to traverse its right child lowercase : str =n.right def _lowerCAmelCase ( __magic_name__ : TreeNode ) -> None: if not isinstance(__magic_name__ , __magic_name__ ) or not node: return lowercase : list[TreeNode] =[] lowercase : List[str] =node while n or stack: while n: stack.append(__magic_name__ ) lowercase : Dict =n.left lowercase : Union[str, Any] =stack.pop() print(n.data , end=''',''' ) lowercase : Tuple =n.right def _lowerCAmelCase ( __magic_name__ : TreeNode ) -> None: if not isinstance(__magic_name__ , __magic_name__ ) or not node: return lowercase , lowercase : Union[str, Any] =[], [] lowercase : Union[str, Any] =node stacka.append(__magic_name__ ) while stacka: # to find the reversed order of post order, store it in stack2 lowercase : List[Any] =stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__magic_name__ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=''',''' ) def _lowerCAmelCase ( __magic_name__ : str = "" , __magic_name__ : str=50 , __magic_name__ : Dict="*" ) -> str: if not s: return "\n" + width * char lowercase , lowercase : List[str] =divmod(width - len(__magic_name__ ) - 2 , 2 ) return f'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) UpperCamelCase_ = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
88
'''simple docstring''' import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'vision-encoder-decoder' lowerCamelCase_ = True def __init__( self : Optional[int] , **UpperCAmelCase__ : Tuple ): '''simple docstring''' super().__init__(**UpperCAmelCase__ ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( F'''A configuraton of type {self.model_type} cannot be instantiated because ''' F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) lowercase : Optional[Any] =kwargs.pop('''encoder''' ) lowercase : List[Any] =encoder_config.pop('''model_type''' ) lowercase : List[str] =kwargs.pop('''decoder''' ) lowercase : Dict =decoder_config.pop('''model_type''' ) lowercase : Union[str, Any] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : List[str] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : str =True @classmethod def lowerCamelCase_ ( cls : List[str] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Optional[int] ): '''simple docstring''' logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowercase : int =True lowercase : Optional[Any] =True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int =copy.deepcopy(self.__dict__ ) lowercase : Union[str, Any] =self.encoder.to_dict() lowercase : Union[str, Any] =self.decoder.to_dict() lowercase : int =self.__class__.model_type return output class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = version.parse('1.11' ) @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return 1E-4 @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} ) class __SCREAMING_SNAKE_CASE ( lowercase__ ): @property def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : List[str] =OrderedDict() lowercase : Tuple ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''} lowercase : Optional[int] ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''} lowercase : int ={0: '''batch''', 1: '''encoder_sequence'''} return common_inputs def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , ): '''simple docstring''' import torch lowercase : Optional[Any] =OrderedDict() lowercase : List[Any] =super().generate_dummy_inputs( UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ ) lowercase , lowercase : Optional[int] =dummy_input['''input_ids'''].shape lowercase : Union[str, Any] =(batch, encoder_sequence, self._config.encoder_hidden_size) lowercase : List[str] =dummy_input.pop('''input_ids''' ) lowercase : Tuple =dummy_input.pop('''attention_mask''' ) lowercase : Union[str, Any] =torch.zeros(UpperCAmelCase__ ) return common_inputs class __SCREAMING_SNAKE_CASE ( lowercase__ ): @property def lowerCamelCase_ ( self : int ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : PretrainedConfig ): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" ): '''simple docstring''' lowercase : List[Any] =encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase__ , UpperCAmelCase__ )
88
1
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ): @register_to_config def __init__( self : Optional[int] , UpperCAmelCase__ : int = 768 , ): '''simple docstring''' super().__init__() lowercase : Union[str, Any] =nn.Parameter(torch.zeros(1 , UpperCAmelCase__ ) ) lowercase : Any =nn.Parameter(torch.ones(1 , UpperCAmelCase__ ) ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Optional[Union[str, torch.device]] = None , UpperCAmelCase__ : Optional[torch.dtype] = None , ): '''simple docstring''' lowercase : Dict =nn.Parameter(self.mean.to(UpperCAmelCase__ ).to(UpperCAmelCase__ ) ) lowercase : Union[str, Any] =nn.Parameter(self.std.to(UpperCAmelCase__ ).to(UpperCAmelCase__ ) ) return self def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Any ): '''simple docstring''' lowercase : Dict =(embeds - self.mean) * 1.0 / self.std return embeds def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : List[str] =(embeds * self.std) + self.mean return embeds
88
'''simple docstring''' import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) UpperCamelCase_ = logging.getLogger(__name__) UpperCamelCase_ = tf.data.AUTOTUNE def _lowerCAmelCase ( ) -> Any: lowercase : Dict =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' ) parser.add_argument( '''--pretrained_model_config''' , type=__magic_name__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , ) parser.add_argument( '''--tokenizer''' , type=__magic_name__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , ) parser.add_argument( '''--per_replica_batch_size''' , type=__magic_name__ , default=8 , help='''Batch size per TPU core.''' , ) parser.add_argument( '''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , ) parser.add_argument( '''--tpu_name''' , type=__magic_name__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , ) parser.add_argument( '''--tpu_zone''' , type=__magic_name__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , ) parser.add_argument( '''--gcp_project''' , type=__magic_name__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' ) parser.add_argument( '''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , ) parser.add_argument( '''--train_dataset''' , type=__magic_name__ , help='''Path to training dataset to load. If the path begins with `gs://`''' ''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , ) parser.add_argument( '''--shuffle_buffer_size''' , type=__magic_name__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , ) parser.add_argument( '''--eval_dataset''' , type=__magic_name__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`''' ''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , ) parser.add_argument( '''--num_epochs''' , type=__magic_name__ , default=1 , help='''Number of epochs to train for.''' , ) parser.add_argument( '''--learning_rate''' , type=__magic_name__ , default=1E-4 , help='''Learning rate to use for training.''' , ) parser.add_argument( '''--weight_decay_rate''' , type=__magic_name__ , default=1E-3 , help='''Weight decay rate to use for training.''' , ) parser.add_argument( '''--max_length''' , type=__magic_name__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , ) parser.add_argument( '''--mlm_probability''' , type=__magic_name__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , ) parser.add_argument('''--output_dir''' , type=__magic_name__ , required=__magic_name__ , help='''Path to save model checkpoints to.''' ) parser.add_argument('''--hub_model_id''' , type=__magic_name__ , help='''Model ID to upload to on the Hugging Face Hub.''' ) lowercase : Union[str, Any] =parser.parse_args() return args def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[Any]: try: if args.tpu_name: lowercase : Dict =tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: lowercase : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( '''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or ''' '''--gcp_project. When running on a TPU VM, use --tpu_name local.''' ) tf.config.experimental_connect_to_cluster(__magic_name__ ) tf.tpu.experimental.initialize_tpu_system(__magic_name__ ) return tpu def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]: lowercase : str =0 for file in file_list: lowercase : List[str] =file.split('''/''' )[-1] lowercase : Union[str, Any] =re.search(R'''-\d+-(\d+)\.tfrecord''' , __magic_name__ ).group(1 ) lowercase : int =int(__magic_name__ ) num_samples += sample_count return num_samples def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> str: lowercase : int =count_samples(__magic_name__ ) lowercase : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__magic_name__ ) if shuffle: lowercase : Union[str, Any] =dataset.shuffle(len(__magic_name__ ) ) lowercase : Any =tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here lowercase : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) ) lowercase : str =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ ) if shuffle: assert shuffle_buffer_size is not None lowercase : int =dataset.shuffle(args.shuffle_buffer_size ) lowercase : Optional[int] =dataset.batch(__magic_name__ , drop_remainder=__magic_name__ ) lowercase : int =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ ) lowercase : Union[str, Any] =dataset.prefetch(__magic_name__ ) return dataset def _lowerCAmelCase ( __magic_name__ : Any ) -> str: if not args.no_tpu: lowercase : Optional[Any] =initialize_tpu(__magic_name__ ) lowercase : Any =tf.distribute.TPUStrategy(__magic_name__ ) else: lowercase : Optional[Any] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' ) lowercase : Any =AutoTokenizer.from_pretrained(args.tokenizer ) lowercase : Union[str, Any] =AutoConfig.from_pretrained(args.pretrained_model_config ) lowercase : Optional[Any] =tokenizer.vocab_size lowercase : str =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) ) if not training_records: raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' ) lowercase : Optional[int] =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) ) if not eval_records: raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' ) lowercase : Any =count_samples(__magic_name__ ) lowercase : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) lowercase : Union[str, Any] =steps_per_epoch * args.num_epochs with strategy.scope(): lowercase : List[Any] =TFAutoModelForMaskedLM.from_config(__magic_name__ ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built lowercase , lowercase : Dict =create_optimizer( num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=__magic_name__ , metrics=['''accuracy'''] ) def decode_fn(__magic_name__ : Optional[Any] ): lowercase : Union[str, Any] ={ '''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), '''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(__magic_name__ , __magic_name__ ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. lowercase : str =DataCollatorForLanguageModeling( tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='''tf''' ) def mask_with_collator(__magic_name__ : Dict ): # TF really needs an isin() function lowercase : int =( ~tf.cast(batch['''attention_mask'''] , tf.bool ) | (batch['''input_ids'''] == tokenizer.cls_token_id) | (batch['''input_ids'''] == tokenizer.sep_token_id) ) lowercase , lowercase : Union[str, Any] =data_collator.tf_mask_tokens( batch['''input_ids'''] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , ) return batch lowercase : List[str] =args.per_replica_batch_size * strategy.num_replicas_in_sync lowercase : Dict =prepare_dataset( __magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , ) lowercase : Union[str, Any] =prepare_dataset( __magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , ) lowercase : Tuple =[] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) ) model.fit( __magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": UpperCamelCase_ = parse_args() main(args)
88
1
'''simple docstring''' import os from pathlib import Path def _lowerCAmelCase ( ) -> List[Any]: from torch.utils.cpp_extension import load lowercase : List[str] =Path(__magic_name__ ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr''' lowercase : str =[ root / filename for filename in [ '''vision.cpp''', os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ), os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ), ] ] load( '''MultiScaleDeformableAttention''' , __magic_name__ , with_cuda=__magic_name__ , extra_include_paths=[str(__magic_name__ )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[ '''-DCUDA_HAS_FP16=1''', '''-D__CUDA_NO_HALF_OPERATORS__''', '''-D__CUDA_NO_HALF_CONVERSIONS__''', '''-D__CUDA_NO_HALF2_OPERATORS__''', ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
88
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys UpperCamelCase_ = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
88
1
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"""vocab_file""": """spiece.model"""} UpperCamelCase_ = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", } } UpperCamelCase_ = { """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } UpperCamelCase_ = """▁""" class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = VOCAB_FILES_NAMES lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Tuple="[CLS]" , UpperCAmelCase__ : Any="[SEP]" , UpperCAmelCase__ : Union[str, Any]="<unk>" , UpperCAmelCase__ : Union[str, Any]="[SEP]" , UpperCAmelCase__ : Optional[Any]="<pad>" , UpperCAmelCase__ : str="[CLS]" , UpperCAmelCase__ : Dict="[MASK]" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ): '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowercase : str =( AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ , normalized=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token ) lowercase : Optional[Any] ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , ) lowercase : int =do_lower_case lowercase : List[str] =remove_space lowercase : Any =keep_accents lowercase : Dict =vocab_file lowercase : int =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase__ ) @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return len(self.sp_model ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Optional[Any] ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[Any] ): '''simple docstring''' lowercase : Union[str, Any] =self.__dict__.copy() lowercase : str =None return state def __setstate__( self : Tuple , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' lowercase : Optional[Any] =d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase : Union[str, Any] ={} lowercase : int =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' if self.remove_space: lowercase : Tuple =''' '''.join(inputs.strip().split() ) else: lowercase : Optional[int] =inputs lowercase : int =outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' ) if not self.keep_accents: lowercase : List[Any] =unicodedata.normalize('''NFKD''' , UpperCAmelCase__ ) lowercase : Optional[int] =''''''.join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] ) if self.do_lower_case: lowercase : Any =outputs.lower() return outputs def lowerCamelCase_ ( self : str , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : Optional[Any] =self.preprocess_text(UpperCAmelCase__ ) lowercase : Tuple =self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ ) lowercase : Dict =[] for piece in pieces: if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): lowercase : Tuple =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowercase : int =cur_pieces[1:] else: lowercase : Any =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCAmelCase__ ) else: new_pieces.append(UpperCAmelCase__ ) return new_pieces def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Dict ): '''simple docstring''' return self.sp_model.PieceToId(UpperCAmelCase__ ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[Any] ): '''simple docstring''' return self.sp_model.IdToPiece(UpperCAmelCase__ ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[Any] ): '''simple docstring''' lowercase : Any =[] lowercase : str ='''''' lowercase : List[str] =False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase__ ) + token lowercase : str =True lowercase : Tuple =[] else: current_sub_tokens.append(UpperCAmelCase__ ) lowercase : Optional[int] =False out_string += self.sp_model.decode(UpperCAmelCase__ ) return out_string.strip() def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase : Tuple =[self.sep_token_id] lowercase : int =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ ) if token_ids_a is not None: return [1] + ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1] return [1] + ([0] * len(UpperCAmelCase__ )) + [1] def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase : Optional[Any] =[self.sep_token_id] lowercase : Union[str, Any] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(UpperCAmelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase : Union[str, Any] =os.path.join( UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase__ , '''wb''' ) as fi: lowercase : Any =self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase__ ) return (out_vocab_file,)
88
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = """▁""" UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""} UpperCamelCase_ = { """vocab_file""": { """facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""", } } UpperCamelCase_ = { """facebook/xglm-564M""": 2048, } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = VOCAB_FILES_NAMES lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ = ['input_ids', 'attention_mask'] def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ): '''simple docstring''' lowercase : int ={} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer lowercase : Optional[Any] =7 lowercase : Optional[int] =[F'''<madeupword{i}>''' for i in range(self.num_madeup_words )] lowercase : List[Any] =kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , ) lowercase : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCAmelCase__ ) ) lowercase : List[Any] =vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase : Union[str, Any] =1 # Mimic fairseq token-to-id alignment for the first 4 token lowercase : List[str] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} lowercase : str =len(self.sp_model ) lowercase : List[Any] ={F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(UpperCAmelCase__ ) lowercase : int ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : int ): '''simple docstring''' lowercase : Optional[int] =self.__dict__.copy() lowercase : List[Any] =None lowercase : Tuple =self.sp_model.serialized_model_proto() return state def __setstate__( self : Optional[Any] , UpperCAmelCase__ : Any ): '''simple docstring''' lowercase : int =d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase : Optional[int] ={} lowercase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a lowercase : List[Any] =[self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase__ )) return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ )) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase : int =[self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : int ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ): '''simple docstring''' return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase : List[str] =self.sp_model.PieceToId(UpperCAmelCase__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' lowercase : Dict =''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , ''' ''' ).strip() return out_string def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(UpperCAmelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase : Dict =os.path.join( UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase__ , '''wb''' ) as fi: lowercase : Optional[int] =self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase__ ) return (out_vocab_file,)
88
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'facebook/bart-large-mnli' lowerCamelCase_ = ( 'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ' 'should be the text to classify, and `labels`, which should be the list of labels to use for classification. ' 'It returns the most likely label in the list of provided `labels` for the input text.' ) lowerCamelCase_ = 'text_classifier' lowerCamelCase_ = AutoTokenizer lowerCamelCase_ = AutoModelForSequenceClassification lowerCamelCase_ = ['text', ['text']] lowerCamelCase_ = ['text'] def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' super().setup() lowercase : Optional[int] =self.model.config lowercase : Tuple =-1 for idx, label in config.idalabel.items(): if label.lower().startswith('''entail''' ): lowercase : Tuple =int(UpperCAmelCase__ ) if self.entailment_id == -1: raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Any =labels return self.pre_processor( [text] * len(UpperCAmelCase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors='''pt''' , padding='''max_length''' , ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[int] ): '''simple docstring''' lowercase : Optional[int] =outputs.logits lowercase : List[Any] =torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
88
'''simple docstring''' import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def _lowerCAmelCase ( __magic_name__ : str ) -> Union[str, Any]: lowercase : Union[str, Any] =os.path.join(args.tf_model_dir , '''parameters.json''' ) lowercase : List[str] =json.loads(open(__magic_name__ ).read() ) if not params: raise ValueError( f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' ) if not args.output.endswith('''.pt''' ): lowercase : Tuple =args.output + '''.pt''' lowercase : int =OrderedDict() with tf.device('''/CPU:0''' ): lowercase : List[Any] =tf.train.load_checkpoint(args.tf_model_dir ) lowercase : int =reader.get_variable_to_shape_map() for key_name in shapes.keys(): lowercase : Any =reader.get_tensor(__magic_name__ ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): lowercase : int =int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): lowercase : Union[str, Any] =8 lowercase : Any ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time lowercase : Dict =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase : List[str] =torch.tensor(__magic_name__ ) elif key_name.startswith('''model/moe''' ): lowercase : Union[str, Any] =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player lowercase : Any =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase : List[Any] =torch.tensor(__magic_name__ ) elif key_name.endswith('''/softmlp/kernel''' ): lowercase : Optional[int] ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase : Optional[Any] =torch.tensor(__magic_name__ ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): lowercase : Union[str, Any] =key_name[-9:-7] for i in range(16 ): lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) lowercase : Any =( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided lowercase : List[str] =torch.tensor(__magic_name__ ) elif key_name.startswith('''model/mlp''' ): lowercase : Dict =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): lowercase : Any ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player lowercase : str =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase : Any =torch.tensor(__magic_name__ ) elif key_name.endswith('''/p1/bias''' ): lowercase : List[Any] ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional lowercase : Union[str, Any] =torch.tensor(__magic_name__ ) elif key_name.endswith('''/p2/kernel''' ): lowercase : int ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player lowercase : Tuple =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase : int =torch.tensor(__magic_name__ ) elif key_name.endswith('''/p2/bias''' ): lowercase : str ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player lowercase : Optional[int] =vnp.copy() # same because it is one dimensional lowercase : List[Any] =torch.tensor(__magic_name__ ) elif key_name.startswith('''model/ln''' ): lowercase : int =int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase : Any ='''model.blocks.%d.feed_forward.norm.bias''' % player lowercase : Optional[int] =vnp.copy() # same because it is one dimensional lowercase : Union[str, Any] =torch.tensor(__magic_name__ ) elif key_name.endswith('''/g''' ): lowercase : Optional[Any] ='''model.blocks.%d.feed_forward.norm.weight''' % player lowercase : Any =vnp.copy() # same because it is one dimensional lowercase : List[Any] =torch.tensor(__magic_name__ ) elif key_name.startswith('''model/att''' ): lowercase : int =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): lowercase : Optional[int] =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum lowercase : Dict =state[:, 0, :, :] lowercase : Tuple =state[:, 1, :, :] lowercase : List[Any] =state[:, 2, :, :] lowercase : Optional[int] =( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase : Optional[Any] =( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase : Optional[int] =( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player lowercase : Dict =torch.tensor(__magic_name__ ) lowercase : List[Any] ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player lowercase : Optional[Any] =torch.tensor(__magic_name__ ) lowercase : Optional[Any] ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player lowercase : Tuple =torch.tensor(__magic_name__ ) elif key_name.endswith('''/o/kernel''' ): lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player lowercase : List[Any] =( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase : str =torch.tensor(__magic_name__ ) elif key_name.startswith('''model/an''' ): lowercase : Optional[Any] =int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase : List[str] ='''model.blocks.%d.self_attn.norm.bias''' % player lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional lowercase : List[str] =torch.tensor(__magic_name__ ) elif key_name.endswith('''/g''' ): lowercase : Any ='''model.blocks.%d.self_attn.norm.weight''' % player lowercase : Any =vnp.copy() # same because it is one dimensional lowercase : Optional[Any] =torch.tensor(__magic_name__ ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): lowercase : Any ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] lowercase : Optional[Any] ='''model.%s.weight''' % nlayer lowercase : Optional[int] =vnp.copy() # same in embedded lowercase : List[Any] =torch.tensor(__magic_name__ ) if key_name.startswith('''model/wte''' ): lowercase : Tuple ='''lm_head.weight''' lowercase : str =vnp.copy() # same in embedded lowercase : Union[str, Any] =torch.tensor(__magic_name__ ) elif key_name.startswith('''model/wob''' ): lowercase : List[str] ='''final_logits_bias''' lowercase : Dict =vnp.copy() # same in embedded lowercase : Tuple =state.reshape((1, -1) ) lowercase : Dict =torch.tensor(__magic_name__ ) elif key_name == "model/dense/kernel": lowercase : Dict ='''model.last_project.weight''' lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase : Optional[Any] =torch.tensor(__magic_name__ ) elif key_name == "model/dense_1/bias": lowercase : List[Any] ='''model.last_project.bias''' lowercase : str =vnp.copy() # same because it is one dimensional lowercase : Optional[Any] =torch.tensor(__magic_name__ ) torch.save(__magic_name__ , args.output ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser( description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""") parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""") UpperCamelCase_ = parser.parse_args() convert_tf_gptsan_to_pt(args)
88
1
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> bool: lowercase : str =len(__magic_name__ ) # We need to create solution object to save path. lowercase : int =[[0 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )] lowercase : List[Any] =run_maze(__magic_name__ , 0 , 0 , __magic_name__ ) if solved: print('''\n'''.join(str(__magic_name__ ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[list[int]] ) -> bool: lowercase : Optional[int] =len(__magic_name__ ) # Final check point. if i == j == (size - 1): lowercase : Optional[int] =1 return True lowercase : Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds lowercase : Tuple =(i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. lowercase : Union[str, Any] =(not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited lowercase : Union[str, Any] =1 # check for directions if ( run_maze(__magic_name__ , i + 1 , __magic_name__ , __magic_name__ ) or run_maze(__magic_name__ , __magic_name__ , j + 1 , __magic_name__ ) or run_maze(__magic_name__ , i - 1 , __magic_name__ , __magic_name__ ) or run_maze(__magic_name__ , __magic_name__ , j - 1 , __magic_name__ ) ): return True lowercase : str =0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
88
'''simple docstring''' import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase_ = """▁""" UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = BigBirdTokenizer lowerCamelCase_ = BigBirdTokenizerFast lowerCamelCase_ = True lowerCamelCase_ = True def lowerCamelCase_ ( self : Any ): '''simple docstring''' super().setUp() lowercase : Optional[int] =self.tokenizer_class(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[int] ='''<s>''' lowercase : int =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Dict =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''[MASK]''' ) self.assertEqual(len(UpperCAmelCase__ ) , 1004 ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' if not self.test_rust_tokenizer: return lowercase : Optional[int] =self.get_tokenizer() lowercase : Any =self.get_rust_tokenizer() lowercase : int ='''I was born in 92000, and this is falsé.''' lowercase : List[str] =tokenizer.tokenize(UpperCAmelCase__ ) lowercase : Dict =rust_tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : str =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Optional[Any] =self.get_rust_tokenizer() lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ ) lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Tuple =BigBirdTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) lowercase : Tuple =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , ) lowercase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) lowercase : Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) lowercase : List[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def lowerCamelCase_ ( self : str ): '''simple docstring''' return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : str ='''Hello World!''' lowercase : Union[str, Any] =[65, 18536, 2260, 101, 66] self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @slow def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : int =( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) # fmt: off lowercase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231 # fmt: on self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @require_torch @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence lowercase : List[str] =list(self.big_tokenizer.get_vocab().keys() )[:10] lowercase : Dict =''' '''.join(UpperCAmelCase__ ) lowercase : Union[str, Any] =self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ ) lowercase : Dict =self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ ) lowercase : Optional[int] =BigBirdConfig(attention_type='''original_full''' ) lowercase : Dict =BigBirdModel(UpperCAmelCase__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**UpperCAmelCase__ ) model(**UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Union[str, Any] =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) lowercase : Dict =tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids ) self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' ) @slow def lowerCamelCase_ ( self : int ): '''simple docstring''' # fmt: off lowercase : str ={'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
88
1
'''simple docstring''' import argparse import collections import json import os import re import string import sys import numpy as np UpperCamelCase_ = re.compile(r"""\b(a|an|the)\b""", re.UNICODE) UpperCamelCase_ = None def _lowerCAmelCase ( ) -> List[str]: lowercase : Any =argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' ) parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' ) parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' ) parser.add_argument( '''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' ) parser.add_argument( '''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' ) parser.add_argument( '''--na-prob-thresh''' , '''-t''' , type=__magic_name__ , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , ) parser.add_argument( '''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__magic_name__ , help='''Save precision-recall curves to directory.''' ) parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def _lowerCAmelCase ( __magic_name__ : Any ) -> int: lowercase : List[Any] ={} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowercase : Union[str, Any] =bool(qa['''answers''']['''text'''] ) return qid_to_has_ans def _lowerCAmelCase ( __magic_name__ : Tuple ) -> List[str]: def remove_articles(__magic_name__ : Optional[int] ): return ARTICLES_REGEX.sub(''' ''' , __magic_name__ ) def white_space_fix(__magic_name__ : List[str] ): return " ".join(text.split() ) def remove_punc(__magic_name__ : List[Any] ): lowercase : Any =set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__magic_name__ : List[Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__magic_name__ ) ) ) ) def _lowerCAmelCase ( __magic_name__ : Any ) -> str: if not s: return [] return normalize_answer(__magic_name__ ).split() def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[int] ) -> Tuple: return int(normalize_answer(__magic_name__ ) == normalize_answer(__magic_name__ ) ) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Optional[Any] ) -> Dict: lowercase : Union[str, Any] =get_tokens(__magic_name__ ) lowercase : Optional[Any] =get_tokens(__magic_name__ ) lowercase : str =collections.Counter(__magic_name__ ) & collections.Counter(__magic_name__ ) lowercase : Optional[int] =sum(common.values() ) if len(__magic_name__ ) == 0 or len(__magic_name__ ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 lowercase : Union[str, Any] =1.0 * num_same / len(__magic_name__ ) lowercase : List[Any] =1.0 * num_same / len(__magic_name__ ) lowercase : List[Any] =(2 * precision * recall) / (precision + recall) return fa def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : int ) -> Optional[Any]: lowercase : Any ={} lowercase : Dict ={} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowercase : Union[str, Any] =qa['''id'''] lowercase : int =[t for t in qa['''answers''']['''text'''] if normalize_answer(__magic_name__ )] if not gold_answers: # For unanswerable questions, only correct answer is empty string lowercase : Union[str, Any] =[''''''] if qid not in preds: print(f'''Missing prediction for {qid}''' ) continue lowercase : Optional[int] =preds[qid] # Take max over all gold answers lowercase : List[str] =max(compute_exact(__magic_name__ , __magic_name__ ) for a in gold_answers ) lowercase : Optional[int] =max(compute_fa(__magic_name__ , __magic_name__ ) for a in gold_answers ) return exact_scores, fa_scores def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Any ) -> List[Any]: lowercase : List[str] ={} for qid, s in scores.items(): lowercase : Optional[Any] =na_probs[qid] > na_prob_thresh if pred_na: lowercase : Union[str, Any] =float(not qid_to_has_ans[qid] ) else: lowercase : Union[str, Any] =s return new_scores def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Tuple=None ) -> Union[str, Any]: if not qid_list: lowercase : Union[str, Any] =len(__magic_name__ ) return collections.OrderedDict( [ ('''exact''', 1_0_0.0 * sum(exact_scores.values() ) / total), ('''f1''', 1_0_0.0 * sum(fa_scores.values() ) / total), ('''total''', total), ] ) else: lowercase : Union[str, Any] =len(__magic_name__ ) return collections.OrderedDict( [ ('''exact''', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total), ('''f1''', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total), ('''total''', total), ] ) def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : int ) -> Optional[Any]: for k in new_eval: lowercase : Optional[int] =new_eval[k] def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Union[str, Any]: plt.step(__magic_name__ , __magic_name__ , color='''b''' , alpha=0.2 , where='''post''' ) plt.fill_between(__magic_name__ , __magic_name__ , step='''post''' , alpha=0.2 , color='''b''' ) plt.xlabel('''Recall''' ) plt.ylabel('''Precision''' ) plt.xlim([0.0, 1.0_5] ) plt.ylim([0.0, 1.0_5] ) plt.title(__magic_name__ ) plt.savefig(__magic_name__ ) plt.clf() def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=None ) -> Optional[Any]: lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : na_probs[k] ) lowercase : int =0.0 lowercase : int =1.0 lowercase : Optional[int] =0.0 lowercase : Tuple =[1.0] lowercase : Union[str, Any] =[0.0] lowercase : Any =0.0 for i, qid in enumerate(__magic_name__ ): if qid_to_has_ans[qid]: true_pos += scores[qid] lowercase : List[str] =true_pos / float(i + 1 ) lowercase : List[str] =true_pos / float(__magic_name__ ) if i == len(__magic_name__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(__magic_name__ ) recalls.append(__magic_name__ ) if out_image: plot_pr_curve(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) return {"ap": 1_0_0.0 * avg_prec} def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[Any] ) -> List[str]: if out_image_dir and not os.path.exists(__magic_name__ ): os.makedirs(__magic_name__ ) lowercase : Optional[Any] =sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return lowercase : List[str] =make_precision_recall_eval( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , out_image=os.path.join(__magic_name__ , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , ) lowercase : List[Any] =make_precision_recall_eval( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , out_image=os.path.join(__magic_name__ , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , ) lowercase : Optional[Any] ={k: float(__magic_name__ ) for k, v in qid_to_has_ans.items()} lowercase : List[str] =make_precision_recall_eval( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , out_image=os.path.join(__magic_name__ , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , ) merge_eval(__magic_name__ , __magic_name__ , '''pr_exact''' ) merge_eval(__magic_name__ , __magic_name__ , '''pr_f1''' ) merge_eval(__magic_name__ , __magic_name__ , '''pr_oracle''' ) def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] ) -> Optional[int]: if not qid_list: return lowercase : str =[na_probs[k] for k in qid_list] lowercase : int =np.ones_like(__magic_name__ ) / float(len(__magic_name__ ) ) plt.hist(__magic_name__ , weights=__magic_name__ , bins=20 , range=(0.0, 1.0) ) plt.xlabel('''Model probability of no-answer''' ) plt.ylabel('''Proportion of dataset''' ) plt.title(f'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(__magic_name__ , f'''na_prob_hist_{name}.png''' ) ) plt.clf() def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : List[Any] ) -> Any: lowercase : List[str] =sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) lowercase : Any =num_no_ans lowercase : Union[str, Any] =cur_score lowercase : Tuple =0.0 lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : na_probs[k] ) for i, qid in enumerate(__magic_name__ ): if qid not in scores: continue if qid_to_has_ans[qid]: lowercase : Tuple =scores[qid] else: if preds[qid]: lowercase : Any =-1 else: lowercase : Tuple =0 cur_score += diff if cur_score > best_score: lowercase : Tuple =cur_score lowercase : Any =na_probs[qid] return 1_0_0.0 * best_score / len(__magic_name__ ), best_thresh def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Any ) -> Dict: lowercase , lowercase : Union[str, Any] =find_best_thresh(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) lowercase , lowercase : Union[str, Any] =find_best_thresh(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) lowercase : Optional[Any] =best_exact lowercase : Optional[Any] =exact_thresh lowercase : str =best_fa lowercase : Tuple =fa_thresh def _lowerCAmelCase ( ) -> List[Any]: with open(OPTS.data_file ) as f: lowercase : Tuple =json.load(__magic_name__ ) lowercase : List[Any] =dataset_json['''data'''] with open(OPTS.pred_file ) as f: lowercase : Optional[int] =json.load(__magic_name__ ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: lowercase : Union[str, Any] =json.load(__magic_name__ ) else: lowercase : Optional[int] ={k: 0.0 for k in preds} lowercase : List[str] =make_qid_to_has_ans(__magic_name__ ) # maps qid to True/False lowercase : Optional[Any] =[k for k, v in qid_to_has_ans.items() if v] lowercase : List[Any] =[k for k, v in qid_to_has_ans.items() if not v] lowercase , lowercase : int =get_raw_scores(__magic_name__ , __magic_name__ ) lowercase : Optional[Any] =apply_no_ans_threshold(__magic_name__ , __magic_name__ , __magic_name__ , OPTS.na_prob_thresh ) lowercase : Dict =apply_no_ans_threshold(__magic_name__ , __magic_name__ , __magic_name__ , OPTS.na_prob_thresh ) lowercase : List[Any] =make_eval_dict(__magic_name__ , __magic_name__ ) if has_ans_qids: lowercase : Optional[Any] =make_eval_dict(__magic_name__ , __magic_name__ , qid_list=__magic_name__ ) merge_eval(__magic_name__ , __magic_name__ , '''HasAns''' ) if no_ans_qids: lowercase : int =make_eval_dict(__magic_name__ , __magic_name__ , qid_list=__magic_name__ ) merge_eval(__magic_name__ , __magic_name__ , '''NoAns''' ) if OPTS.na_prob_file: find_all_best_thresh(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , OPTS.out_image_dir ) histogram_na_prob(__magic_name__ , __magic_name__ , OPTS.out_image_dir , '''hasAns''' ) histogram_na_prob(__magic_name__ , __magic_name__ , OPTS.out_image_dir , '''noAns''' ) if OPTS.out_file: with open(OPTS.out_file , '''w''' ) as f: json.dump(__magic_name__ , __magic_name__ ) else: print(json.dumps(__magic_name__ , indent=2 ) ) if __name__ == "__main__": UpperCamelCase_ = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
88
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] ) -> str: lowercase : Optional[Any] =[0 for i in range(r + 1 )] # nc0 = 1 lowercase : Optional[Any] =1 for i in range(1 , n + 1 ): # to compute current row from previous row. lowercase : str =min(__magic_name__ , __magic_name__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
88
1
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"""vocab_file""": """spiece.model"""} UpperCamelCase_ = { """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", } } UpperCamelCase_ = { """xlnet-base-cased""": None, """xlnet-large-cased""": None, } # Segments (not really needed) UpperCamelCase_ = 0 UpperCamelCase_ = 1 UpperCamelCase_ = 2 UpperCamelCase_ = 3 UpperCamelCase_ = 4 class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = VOCAB_FILES_NAMES lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ = 'left' def __init__( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : List[str]="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Optional[int]="<unk>" , UpperCAmelCase__ : Tuple="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Optional[int]="<cls>" , UpperCAmelCase__ : Dict="<mask>" , UpperCAmelCase__ : Union[str, Any]=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Dict , ): '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it lowercase : Dict =AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token lowercase : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , ) lowercase : Optional[Any] =3 lowercase : int =do_lower_case lowercase : Tuple =remove_space lowercase : List[Any] =keep_accents lowercase : Tuple =vocab_file lowercase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase__ ) @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return len(self.sp_model ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : List[Any] ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[Any] ): '''simple docstring''' lowercase : int =self.__dict__.copy() lowercase : List[Any] =None return state def __setstate__( self : Any , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : int =d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase : Dict ={} lowercase : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' if self.remove_space: lowercase : str =''' '''.join(inputs.strip().split() ) else: lowercase : int =inputs lowercase : List[Any] =outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' ) if not self.keep_accents: lowercase : Any =unicodedata.normalize('''NFKD''' , UpperCAmelCase__ ) lowercase : int =''''''.join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] ) if self.do_lower_case: lowercase : int =outputs.lower() return outputs def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : List[str] =self.preprocess_text(UpperCAmelCase__ ) lowercase : Dict =self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ ) lowercase : List[str] =[] for piece in pieces: if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): lowercase : Optional[Any] =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowercase : List[Any] =cur_pieces[1:] else: lowercase : List[Any] =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCAmelCase__ ) else: new_pieces.append(UpperCAmelCase__ ) return new_pieces def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[int] ): '''simple docstring''' return self.sp_model.PieceToId(UpperCAmelCase__ ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Dict ): '''simple docstring''' return self.sp_model.IdToPiece(UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : str =''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , ''' ''' ).strip() return out_string def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Union[str, Any] , ): '''simple docstring''' lowercase : Optional[int] =kwargs.pop('''use_source_tokenizer''' , UpperCAmelCase__ ) lowercase : Any =self.convert_ids_to_tokens(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 lowercase : Dict =[] lowercase : Optional[Any] =[] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) ) lowercase : List[Any] =[] sub_texts.append(UpperCAmelCase__ ) else: current_sub_text.append(UpperCAmelCase__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens lowercase : Any =''''''.join(UpperCAmelCase__ ) lowercase : str =( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: lowercase : str =self.clean_up_tokenization(UpperCAmelCase__ ) return clean_text else: return text def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase : Dict =[self.sep_token_id] lowercase : Union[str, Any] =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ ) if token_ids_a is not None: return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] return ([0] * len(UpperCAmelCase__ )) + [1, 1] def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase : Optional[Any] =[self.sep_token_id] lowercase : Tuple =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(UpperCAmelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase : Optional[Any] =os.path.join( UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase__ , '''wb''' ) as fi: lowercase : Optional[int] =self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase__ ) return (out_vocab_file,)
88
'''simple docstring''' from collections import defaultdict def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> bool: lowercase : Optional[int] =first_str.lower().strip() lowercase : Union[str, Any] =second_str.lower().strip() # Remove whitespace lowercase : Optional[int] =first_str.replace(''' ''' , '''''' ) lowercase : Optional[Any] =second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(__magic_name__ ) != len(__magic_name__ ): return False # Default values for count should be 0 lowercase : defaultdict[str, int] =defaultdict(__magic_name__ ) # For each character in input strings, # increment count in the corresponding for i in range(len(__magic_name__ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCamelCase_ = input("""Enter the first string """).strip() UpperCamelCase_ = input("""Enter the second string """).strip() UpperCamelCase_ = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
88
1
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int ) -> int: if divisor % 5 == 0 or divisor % 2 == 0: return 0 lowercase : List[str] =1 lowercase : Union[str, Any] =1 while repunit: lowercase : List[Any] =(10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def _lowerCAmelCase ( __magic_name__ : int = 1000000 ) -> int: lowercase : Any =limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(__magic_name__ ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'''{solution() = }''')
88
'''simple docstring''' import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = None lowerCamelCase_ = BloomTokenizerFast lowerCamelCase_ = BloomTokenizerFast lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = 'tokenizer_file' lowerCamelCase_ = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' super().setUp() lowercase : Union[str, Any] =BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Any ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : str =self.get_rust_tokenizer() lowercase : List[str] =['''The quick brown fox</s>''', '''jumps over the lazy dog</s>'''] lowercase : Any =[[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]] lowercase : Any =tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids'''] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any=6 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input lowercase : Tuple ='''This is a simple input''' lowercase : int =['''This is a simple input 1''', '''This is a simple input 2'''] lowercase : Optional[Any] =('''This is a simple input''', '''This is a pair''') lowercase : int =[ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests try: tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) except ValueError: self.fail('''Bloom Tokenizer should be able to deal with padding''' ) lowercase : Optional[int] =None # Hotfixing padding = None self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' ) # Simple input self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' ) # Simple input self.assertRaises( UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , ) # Pair input self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' ) # Pair input self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' ) # Pair input self.assertRaises( UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Union[str, Any] =self.get_rust_tokenizer() lowercase : Dict =load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ ) lowercase : Union[str, Any] =next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data lowercase : int =list(sample_data.values() ) lowercase : Any =list(map(tokenizer.encode , UpperCAmelCase__ ) ) lowercase : List[str] =[tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
88
1
'''simple docstring''' from __future__ import annotations UpperCamelCase_ = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : list[int] , __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]: lowercase : Any =[ [0 for col in range(len(grid[0] ) )] for row in range(len(__magic_name__ ) ) ] # the reference grid lowercase : Union[str, Any] =1 lowercase : List[Any] =[ [0 for col in range(len(grid[0] ) )] for row in range(len(__magic_name__ ) ) ] # the action grid lowercase : Optional[Any] =init[0] lowercase : Optional[int] =init[1] lowercase : str =0 lowercase : Dict =g + heuristic[x][y] # cost from starting cell to destination cell lowercase : str =[[f, g, x, y]] lowercase : Any =False # flag that is set when search is complete lowercase : Tuple =False # flag set if we can't find expand while not found and not resign: if len(__magic_name__ ) == 0: raise ValueError('''Algorithm is unable to find solution''' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() lowercase : Tuple =cell.pop() lowercase : str =next_cell[2] lowercase : Optional[int] =next_cell[3] lowercase : List[Any] =next_cell[1] if x == goal[0] and y == goal[1]: lowercase : Optional[Any] =True else: for i in range(len(__magic_name__ ) ): # to try out different valid actions lowercase : Optional[int] =x + DIRECTIONS[i][0] lowercase : Union[str, Any] =y + DIRECTIONS[i][1] if xa >= 0 and xa < len(__magic_name__ ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: lowercase : Dict =g + cost lowercase : Optional[Any] =ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) lowercase : List[str] =1 lowercase : Tuple =i lowercase : Optional[int] =[] lowercase : Optional[int] =goal[0] lowercase : Any =goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: lowercase : Dict =x - DIRECTIONS[action[x][y]][0] lowercase : str =y - DIRECTIONS[action[x][y]][1] lowercase : str =xa lowercase : Union[str, Any] =ya invpath.append([x, y] ) lowercase : List[str] =[] for i in range(len(__magic_name__ ) ): path.append(invpath[len(__magic_name__ ) - 1 - i] ) return path, action if __name__ == "__main__": UpperCamelCase_ = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] UpperCamelCase_ = [0, 0] # all coordinates are given in format [y,x] UpperCamelCase_ = [len(grid) - 1, len(grid[0]) - 1] UpperCamelCase_ = 1 # the cost map which pushes the path closer to the goal UpperCamelCase_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): UpperCamelCase_ = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map UpperCamelCase_ = 99 UpperCamelCase_ , UpperCamelCase_ = search(grid, init, goal, cost, heuristic) print("""ACTION MAP""") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
88
'''simple docstring''' import math def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float: if ( not isinstance(__magic_name__ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * power_factor def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float: if ( not isinstance(__magic_name__ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
88
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""", # See all Dinat models at https://huggingface.co/models?filter=dinat } class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ): lowerCamelCase_ = 'dinat' lowerCamelCase_ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : str , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : Union[str, Any]=64 , UpperCAmelCase__ : List[Any]=[3, 4, 6, 5] , UpperCAmelCase__ : Dict=[2, 4, 8, 16] , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : int=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCAmelCase__ : Union[str, Any]=3.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Dict=1E-5 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : List[str] , ): '''simple docstring''' super().__init__(**UpperCAmelCase__ ) lowercase : Optional[int] =patch_size lowercase : Dict =num_channels lowercase : Dict =embed_dim lowercase : Any =depths lowercase : str =len(UpperCAmelCase__ ) lowercase : str =num_heads lowercase : str =kernel_size lowercase : str =dilations lowercase : int =mlp_ratio lowercase : int =qkv_bias lowercase : Optional[Any] =hidden_dropout_prob lowercase : Tuple =attention_probs_dropout_prob lowercase : List[str] =drop_path_rate lowercase : Dict =hidden_act lowercase : str =layer_norm_eps lowercase : Dict =initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowercase : Union[str, Any] =int(embed_dim * 2 ** (len(UpperCAmelCase__ ) - 1) ) lowercase : List[str] =layer_scale_init_value lowercase : Tuple =['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(UpperCAmelCase__ ) + 1 )] lowercase , lowercase : str =get_aligned_output_features_output_indices( out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names )
88
'''simple docstring''' import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ): '''simple docstring''' warnings.warn( '''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use CLIPImageProcessor instead.''' , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
88
1
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = LEDTokenizer lowerCamelCase_ = LEDTokenizerFast lowerCamelCase_ = True def lowerCamelCase_ ( self : int ): '''simple docstring''' super().setUp() lowercase : Optional[int] =[ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowercase : Tuple =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) lowercase : Optional[int] =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowercase : Union[str, Any] ={'''unk_token''': '''<unk>'''} lowercase : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase__ ) ) def lowerCamelCase_ ( self : List[Any] , **UpperCAmelCase__ : Dict ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] , **UpperCAmelCase__ : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : int ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Union[str, Any] =['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] lowercase : Optional[int] =[0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase : Dict =tokenizer(UpperCAmelCase__ , max_length=len(UpperCAmelCase__ ) , padding=UpperCAmelCase__ , return_tensors='''pt''' ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowercase : List[str] =batch.input_ids.tolist()[0] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) @require_torch def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase : Any =tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='''pt''' ) self.assertIn('''input_ids''' , UpperCAmelCase__ ) self.assertIn('''attention_mask''' , UpperCAmelCase__ ) self.assertNotIn('''labels''' , UpperCAmelCase__ ) self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase__ ) @require_torch def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Optional[int] =[ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase : str =tokenizer(text_target=UpperCAmelCase__ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase : Tuple =tokenizer( ['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='''pt''' ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[int] =['''A long paragraph for summarization.'''] lowercase : Any =[ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase : List[Any] =tokenizer(UpperCAmelCase__ , return_tensors='''pt''' ) lowercase : Optional[Any] =tokenizer(text_target=UpperCAmelCase__ , return_tensors='''pt''' ) lowercase : Any =inputs['''input_ids'''] lowercase : List[Any] =targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase : Any =['''Summary of the text.''', '''Another summary.'''] lowercase : Optional[int] =[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowercase : List[Any] =tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ ) lowercase : Any =[[0] * len(UpperCAmelCase__ ) for x in encoded_output['''input_ids''']] lowercase : Tuple =tokenizer.pad(UpperCAmelCase__ ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' pass def lowerCamelCase_ ( self : int ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase : List[Any] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : str =self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : Optional[Any] ='''A, <mask> AllenNLP sentence.''' lowercase : List[Any] =tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ ) lowercase : Optional[Any] =tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) lowercase : Optional[int] =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) lowercase : str =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
88
'''simple docstring''' import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser( description=( """Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned""" """ Distillation""" ) ) parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""]) parser.add_argument("""--model_name""", default="""roberta-large""", type=str) parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str) parser.add_argument("""--vocab_transform""", action="""store_true""") UpperCamelCase_ = parser.parse_args() if args.model_type == "roberta": UpperCamelCase_ = RobertaForMaskedLM.from_pretrained(args.model_name) UpperCamelCase_ = """roberta""" elif args.model_type == "gpt2": UpperCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name) UpperCamelCase_ = """transformer""" UpperCamelCase_ = model.state_dict() UpperCamelCase_ = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: UpperCamelCase_ = state_dict[f'''{prefix}.{param_name}'''] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: UpperCamelCase_ = f'''{prefix}.embeddings.{w}.weight''' UpperCamelCase_ = state_dict[param_name] for w in ["weight", "bias"]: UpperCamelCase_ = f'''{prefix}.embeddings.LayerNorm.{w}''' UpperCamelCase_ = state_dict[param_name] # Transformer Blocks # UpperCamelCase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[ f'''{prefix}.h.{teacher_idx}.{layer}.{w}''' ] UpperCamelCase_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias'''] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}''' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: UpperCamelCase_ = state_dict[f'''{layer}'''] if args.vocab_transform: for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[f'''lm_head.dense.{w}'''] UpperCamelCase_ = state_dict[f'''lm_head.layer_norm.{w}'''] elif args.model_type == "gpt2": for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[f'''{prefix}.ln_f.{w}'''] UpperCamelCase_ = state_dict["""lm_head.weight"""] print(f'''N layers selected for distillation: {std_idx}''') print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
88
1
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> bool: lowercase : int =len(__magic_name__ ) lowercase : List[Any] =len(__magic_name__ ) lowercase : Any =[[False for _ in range(m + 1 )] for _ in range(n + 1 )] lowercase : int =True for i in range(__magic_name__ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: lowercase : Any =True if a[i].islower(): lowercase : List[str] =True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
88
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict: for param in module.parameters(): lowercase : List[str] =False def _lowerCAmelCase ( ) -> List[str]: lowercase : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowercase : Optional[int] ='''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> str: lowercase : Optional[int] =plt.imshow(__magic_name__ ) fig.axes.get_xaxis().set_visible(__magic_name__ ) fig.axes.get_yaxis().set_visible(__magic_name__ ) plt.show() def _lowerCAmelCase ( ) -> List[Any]: lowercase : Any =datetime.now() lowercase : Dict =current_time.strftime('''%H:%M:%S''' ) return timestamp
88
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 42 class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ): @register_to_config def __init__( self : Union[str, Any] , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCAmelCase__ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCAmelCase__ : Tuple[int] = (64,) , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : str = "silu" , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 256 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : float = 0.1_82_15 , UpperCAmelCase__ : str = "group" , ): '''simple docstring''' super().__init__() # pass init params to Encoder lowercase : str =Encoder( in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , down_block_types=UpperCAmelCase__ , block_out_channels=UpperCAmelCase__ , layers_per_block=UpperCAmelCase__ , act_fn=UpperCAmelCase__ , norm_num_groups=UpperCAmelCase__ , double_z=UpperCAmelCase__ , ) lowercase : List[str] =vq_embed_dim if vq_embed_dim is not None else latent_channels lowercase : Any =nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , 1 ) lowercase : List[str] =VectorQuantizer(UpperCAmelCase__ , UpperCAmelCase__ , beta=0.25 , remap=UpperCAmelCase__ , sane_index_shape=UpperCAmelCase__ ) lowercase : Optional[int] =nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , 1 ) # pass init params to Decoder lowercase : str =Decoder( in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , up_block_types=UpperCAmelCase__ , block_out_channels=UpperCAmelCase__ , layers_per_block=UpperCAmelCase__ , act_fn=UpperCAmelCase__ , norm_num_groups=UpperCAmelCase__ , norm_type=UpperCAmelCase__ , ) @apply_forward_hook def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : bool = True ): '''simple docstring''' lowercase : Any =self.encoder(UpperCAmelCase__ ) lowercase : Tuple =self.quant_conv(UpperCAmelCase__ ) if not return_dict: return (h,) return VQEncoderOutput(latents=UpperCAmelCase__ ) @apply_forward_hook def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True ): '''simple docstring''' # also go through quantization layer if not force_not_quantize: lowercase , lowercase , lowercase : Union[str, Any] =self.quantize(UpperCAmelCase__ ) else: lowercase : List[str] =h lowercase : Union[str, Any] =self.post_quant_conv(UpperCAmelCase__ ) lowercase : int =self.decoder(UpperCAmelCase__ , quant if self.config.norm_type == '''spatial''' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : bool = True ): '''simple docstring''' lowercase : List[str] =sample lowercase : Optional[Any] =self.encode(UpperCAmelCase__ ).latents lowercase : Tuple =self.decode(UpperCAmelCase__ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=UpperCAmelCase__ )
88
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _lowerCAmelCase ( ) -> List[Any]: lowercase : Tuple =HfArgumentParser(__magic_name__ ) lowercase : Union[str, Any] =parser.parse_args_into_dataclasses()[0] lowercase : Any =TensorFlowBenchmark(args=__magic_name__ ) try: lowercase : List[Any] =parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase : List[Any] ='''Arg --no_{0} is no longer used, please use --no-{0} instead.''' lowercase : Any =''' '''.join(str(__magic_name__ ).split(''' ''' )[:-1] ) lowercase : Optional[Any] ='''''' lowercase : List[str] =eval(str(__magic_name__ ).split(''' ''' )[-1] ) lowercase : Optional[Any] =[] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__magic_name__ ) if len(__magic_name__ ) > 0: lowercase : int =full_error_msg + begin_error_msg + str(__magic_name__ ) raise ValueError(__magic_name__ ) benchmark.run() if __name__ == "__main__": main()
88
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self : Tuple , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' warnings.warn( '''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DeiTImageProcessor instead.''' , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
88
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> bool: lowercase : str =len(__magic_name__ ) # We need to create solution object to save path. lowercase : int =[[0 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )] lowercase : List[Any] =run_maze(__magic_name__ , 0 , 0 , __magic_name__ ) if solved: print('''\n'''.join(str(__magic_name__ ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[list[int]] ) -> bool: lowercase : Optional[int] =len(__magic_name__ ) # Final check point. if i == j == (size - 1): lowercase : Optional[int] =1 return True lowercase : Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds lowercase : Tuple =(i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. lowercase : Union[str, Any] =(not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited lowercase : Union[str, Any] =1 # check for directions if ( run_maze(__magic_name__ , i + 1 , __magic_name__ , __magic_name__ ) or run_maze(__magic_name__ , __magic_name__ , j + 1 , __magic_name__ ) or run_maze(__magic_name__ , i - 1 , __magic_name__ , __magic_name__ ) or run_maze(__magic_name__ , __magic_name__ , j - 1 , __magic_name__ ) ): return True lowercase : str =0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
88
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar UpperCamelCase_ = TypeVar("""T""") UpperCamelCase_ = TypeVar("""U""") class __SCREAMING_SNAKE_CASE ( Generic[T, U] ): def __init__( self : List[Any] , UpperCAmelCase__ : T | None , UpperCAmelCase__ : U | None ): '''simple docstring''' lowercase : Optional[Any] =key lowercase : Optional[int] =val lowercase : DoubleLinkedListNode[T, U] | None =None lowercase : DoubleLinkedListNode[T, U] | None =None def __repr__( self : str ): '''simple docstring''' return ( F'''Node: key: {self.key}, val: {self.val}, ''' F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}''' ) class __SCREAMING_SNAKE_CASE ( Generic[T, U] ): def __init__( self : Optional[Any] ): '''simple docstring''' lowercase : DoubleLinkedListNode[T, U] =DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : DoubleLinkedListNode[T, U] =DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase , lowercase : str =self.rear, self.head def __repr__( self : Dict ): '''simple docstring''' lowercase : Union[str, Any] =['''DoubleLinkedList'''] lowercase : List[Any] =self.head while node.next is not None: rep.append(str(UpperCAmelCase__ ) ) lowercase : List[str] =node.next rep.append(str(self.rear ) ) return ",\n ".join(UpperCAmelCase__ ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : DoubleLinkedListNode[T, U] ): '''simple docstring''' lowercase : List[Any] =self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None lowercase : List[Any] =node lowercase : Optional[int] =previous lowercase : Optional[Any] =node lowercase : Union[str, Any] =self.rear def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : DoubleLinkedListNode[T, U] ): '''simple docstring''' if node.prev is None or node.next is None: return None lowercase : Tuple =node.next lowercase : Tuple =node.prev lowercase : int =None lowercase : List[Any] =None return node class __SCREAMING_SNAKE_CASE ( Generic[T, U] ): lowerCamelCase_ = {} def __init__( self : Optional[Any] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : DoubleLinkedList[T, U] =DoubleLinkedList() lowercase : Any =capacity lowercase : List[Any] =0 lowercase : str =0 lowercase : Any =0 lowercase : dict[T, DoubleLinkedListNode[T, U]] ={} def __repr__( self : Tuple ): '''simple docstring''' return ( F'''CacheInfo(hits={self.hits}, misses={self.miss}, ''' F'''capacity={self.capacity}, current size={self.num_keys})''' ) def __contains__( self : List[str] , UpperCAmelCase__ : T ): '''simple docstring''' return key in self.cache def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : T ): '''simple docstring''' # Note: pythonic interface would throw KeyError rather than return None if key in self.cache: self.hits += 1 lowercase : DoubleLinkedListNode[T, U] =self.cache[key] lowercase : int =self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(UpperCAmelCase__ ) return node.val self.miss += 1 return None def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : T , UpperCAmelCase__ : U ): '''simple docstring''' if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity lowercase : Optional[int] =self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(UpperCAmelCase__ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 lowercase : Optional[Any] =DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value lowercase : Union[str, Any] =self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list lowercase : str =value self.list.add(UpperCAmelCase__ ) @classmethod def lowerCamelCase_ ( cls : List[str] , UpperCAmelCase__ : int = 128 ): '''simple docstring''' def cache_decorator_inner(UpperCAmelCase__ : Callable[[T], U] ) -> Callable[..., U]: def cache_decorator_wrapper(*UpperCAmelCase__ : T ) -> U: if func not in cls.decorator_function_to_instance_map: lowercase : int =LRUCache(UpperCAmelCase__ ) lowercase : str =cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: lowercase : str =func(*UpperCAmelCase__ ) cls.decorator_function_to_instance_map[func].put(args[0] , UpperCAmelCase__ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(UpperCAmelCase__ , '''cache_info''' , UpperCAmelCase__ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
88
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM lowercase : Any =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ ) @torch.no_grad() def __call__( self : List[Any] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ): '''simple docstring''' # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size , UpperCAmelCase__ ): lowercase : Optional[int] =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowercase : Optional[int] =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowercase : str =randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCAmelCase__ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowercase : Dict =self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowercase : Dict =self.scheduler.step( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample lowercase : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 ) lowercase : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase : List[str] =self.numpy_to_pil(UpperCAmelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase__ )
88
1
'''simple docstring''' import random def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : Optional[int] ) -> Optional[Any]: lowercase : List[Any] =a[left_index] lowercase : Any =left_index + 1 for j in range(left_index + 1 , __magic_name__ ): if a[j] < pivot: lowercase , lowercase : Union[str, Any] =a[i], a[j] i += 1 lowercase , lowercase : Any =a[i - 1], a[left_index] return i - 1 def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : Dict ) -> Dict: if left < right: lowercase : str =random.randint(__magic_name__ , right - 1 ) lowercase , lowercase : List[str] =( a[left], a[pivot], ) # switches the pivot with the left most bound lowercase : Tuple =partition(__magic_name__ , __magic_name__ , __magic_name__ ) quick_sort_random( __magic_name__ , __magic_name__ , __magic_name__ ) # recursive quicksort to the left of the pivot point quick_sort_random( __magic_name__ , pivot_index + 1 , __magic_name__ ) # recursive quicksort to the right of the pivot point def _lowerCAmelCase ( ) -> List[Any]: lowercase : Union[str, Any] =input('''Enter numbers separated by a comma:\n''' ).strip() lowercase : Tuple =[int(__magic_name__ ) for item in user_input.split(''',''' )] quick_sort_random(__magic_name__ , 0 , len(__magic_name__ ) ) print(__magic_name__ ) if __name__ == "__main__": main()
88
'''simple docstring''' import argparse import copy def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]: lowercase : int ={} with open(__magic_name__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: lowercase : List[str] =[] _list.append([line.split()[1], line.split()[2]] ) lowercase : Tuple =_list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: lowercase : List[Any] =[] _list.append([line.split()[0], line.split()[2]] ) lowercase : Union[str, Any] =_list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> str: with open(__magic_name__ ) as f: lowercase : Optional[int] =f.read(1 ) lowercase : List[Any] =start_node lowercase : List[Any] =[] lowercase : str =start_node lowercase : str =0 while visiting not in first_solution: lowercase : Optional[int] =10000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution: lowercase : List[Any] =k[1] lowercase : str =k[0] first_solution.append(__magic_name__ ) lowercase : Any =distance_of_first_solution + int(__magic_name__ ) lowercase : Optional[int] =best_node first_solution.append(__magic_name__ ) lowercase : str =0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 lowercase : str =( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10000 ) return first_solution, distance_of_first_solution def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Any ) -> Tuple: lowercase : Tuple =[] for n in solution[1:-1]: lowercase : Dict =solution.index(__magic_name__ ) for kn in solution[1:-1]: lowercase : Tuple =solution.index(__magic_name__ ) if n == kn: continue lowercase : Union[str, Any] =copy.deepcopy(__magic_name__ ) lowercase : Optional[int] =kn lowercase : List[Any] =n lowercase : List[Any] =0 for k in _tmp[:-1]: lowercase : Optional[int] =_tmp[_tmp.index(__magic_name__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: lowercase : Optional[int] =distance + int(i[1] ) _tmp.append(__magic_name__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) lowercase : Union[str, Any] =len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Union[str, Any]: lowercase : str =1 lowercase : List[Any] =first_solution lowercase : Any =[] lowercase : str =distance_of_first_solution lowercase : str =solution while count <= iters: lowercase : Union[str, Any] =find_neighborhood(__magic_name__ , __magic_name__ ) lowercase : Dict =0 lowercase : int =neighborhood[index_of_best_solution] lowercase : Optional[int] =len(__magic_name__ ) - 1 lowercase : List[Any] =False while not found: lowercase : List[Any] =0 while i < len(__magic_name__ ): if best_solution[i] != solution[i]: lowercase : List[str] =best_solution[i] lowercase : Dict =solution[i] break lowercase : Any =i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) lowercase : str =True lowercase : int =best_solution[:-1] lowercase : Any =neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: lowercase : Optional[int] =cost lowercase : str =solution else: lowercase : Optional[int] =index_of_best_solution + 1 lowercase : List[Any] =neighborhood[index_of_best_solution] if len(__magic_name__ ) >= size: tabu_list.pop(0 ) lowercase : Optional[int] =count + 1 return best_solution_ever, best_cost def _lowerCAmelCase ( __magic_name__ : str=None ) -> Tuple: lowercase : List[str] =generate_neighbours(args.File ) lowercase , lowercase : Optional[Any] =generate_first_solution( args.File , __magic_name__ ) lowercase , lowercase : int =tabu_search( __magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , ) print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""") parser.add_argument( """-f""", """--File""", type=str, help="""Path to the file containing the data""", required=True, ) parser.add_argument( """-i""", """--Iterations""", type=int, help="""How many iterations the algorithm should perform""", required=True, ) parser.add_argument( """-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True ) # Pass the arguments to main method main(parser.parse_args())
88
1
'''simple docstring''' from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _lowerCAmelCase ( __magic_name__ : str = "isbn/0140328726" ) -> dict: lowercase : Optional[int] =olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: lowercase : List[str] =f'''{olid} is not a valid Open Library olid''' raise ValueError(__magic_name__ ) return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json() def _lowerCAmelCase ( __magic_name__ : dict ) -> dict: lowercase : Tuple ={ '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } lowercase : List[str] ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()} lowercase : Dict =[ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] lowercase : str =data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(__magic_name__ , __magic_name__ ): lowercase : Optional[int] =''', '''.join(__magic_name__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: UpperCamelCase_ = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(f'''\nSearching Open Library for ISBN: {isbn}...\n''') try: UpperCamelCase_ = summarize_book(get_openlibrary_data(f'''isbn/{isbn}''')) print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'''Sorry, there are no results for ISBN: {isbn}.''')
88
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int = 1000000 ) -> int: lowercase : Dict =set(range(3 , __magic_name__ , 2 ) ) primes.add(2 ) for p in range(3 , __magic_name__ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , __magic_name__ , __magic_name__ ) ) ) lowercase : List[Any] =[float(__magic_name__ ) for n in range(limit + 1 )] for p in primes: for n in range(__magic_name__ , limit + 1 , __magic_name__ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f'''{solution() = }''')
88
1
'''simple docstring''' from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = ['image_processor', 'tokenizer'] lowerCamelCase_ = 'AutoImageProcessor' lowerCamelCase_ = 'AutoTokenizer' def __init__( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any ): '''simple docstring''' super().__init__(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Optional[Any] =self.image_processor def __call__( self : Optional[Any] , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : Any ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowercase : Dict =self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ ) if images is not None: lowercase : List[Any] =self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ ) if text is not None and images is not None: lowercase : str =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : int ): '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ ) @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
88
'''simple docstring''' import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = BioGptTokenizer lowerCamelCase_ = False def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase : List[str] =[ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] lowercase : Any =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) lowercase : Union[str, Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase__ ) ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Dict ='''lower newer''' lowercase : str ='''lower newer''' return input_text, output_text def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : List[Any] =BioGptTokenizer(self.vocab_file , self.merges_file ) lowercase : Any ='''lower''' lowercase : int =['''low''', '''er</w>'''] lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Optional[int] =tokens + ['''<unk>'''] lowercase : Any =[14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Dict =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) lowercase : List[str] =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ ) lowercase : Optional[int] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ ) lowercase : str =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) lowercase : Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
88
1
'''simple docstring''' import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : List[Any]=7 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Any=99 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=512 , UpperCAmelCase__ : Optional[Any]=16 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Optional[Any]=None , ): '''simple docstring''' lowercase : Dict =parent lowercase : str =batch_size lowercase : Optional[Any] =seq_length lowercase : List[str] =is_training lowercase : List[Any] =use_input_mask lowercase : Union[str, Any] =use_token_type_ids lowercase : Optional[Any] =use_labels lowercase : Tuple =vocab_size lowercase : Union[str, Any] =hidden_size lowercase : List[Any] =num_hidden_layers lowercase : str =num_attention_heads lowercase : int =intermediate_size lowercase : Union[str, Any] =hidden_act lowercase : Dict =hidden_dropout_prob lowercase : List[Any] =attention_probs_dropout_prob lowercase : Union[str, Any] =max_position_embeddings lowercase : Optional[int] =type_vocab_size lowercase : Dict =type_sequence_label_size lowercase : Dict =initializer_range lowercase : str =num_labels lowercase : int =num_choices lowercase : Dict =scope def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : List[Any] =None if self.use_input_mask: lowercase : Dict =random_attention_mask([self.batch_size, self.seq_length] ) lowercase : int =None if self.use_token_type_ids: lowercase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase : Dict =None lowercase : List[Any] =None lowercase : List[str] =None if self.use_labels: lowercase : Any =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Tuple =ids_tensor([self.batch_size] , self.num_choices ) lowercase : Optional[int] =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self : Any ): '''simple docstring''' return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ): '''simple docstring''' lowercase : Optional[int] =NystromformerModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : Any =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : Any =model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any ): '''simple docstring''' lowercase : Tuple =NystromformerForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Tuple =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ): '''simple docstring''' lowercase : Optional[Any] =NystromformerForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] ): '''simple docstring''' lowercase : Optional[Any] =self.num_labels lowercase : Any =NystromformerForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[int] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' lowercase : List[str] =self.num_labels lowercase : Optional[Any] =NystromformerForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ): '''simple docstring''' lowercase : Any =self.num_choices lowercase : List[str] =NystromformerForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Optional[int] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Union[str, Any] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Dict =self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Union[str, Any] =config_and_inputs lowercase : Dict ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) lowerCamelCase_ = ( { 'feature-extraction': NystromformerModel, 'fill-mask': NystromformerForMaskedLM, 'question-answering': NystromformerForQuestionAnswering, 'text-classification': NystromformerForSequenceClassification, 'token-classification': NystromformerForTokenClassification, 'zero-shot': NystromformerForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : List[Any] =NystromformerModelTester(self ) lowercase : Tuple =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase : Dict =type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : str ): '''simple docstring''' for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : List[Any] =NystromformerModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Dict =NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' ) lowercase : Any =torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): lowercase : Tuple =model(UpperCAmelCase__ )[0] lowercase : int =torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCAmelCase__ ) lowercase : Dict =torch.tensor( [[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) ) @slow def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Optional[int] ='''the [MASK] of Belgium is Brussels''' lowercase : Dict =AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' ) lowercase : Optional[int] =NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' ) lowercase : List[Any] =tokenizer(UpperCAmelCase__ , return_tensors='''pt''' ) with torch.no_grad(): lowercase : Optional[int] =model(encoding.input_ids ).logits lowercase : Tuple =token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCAmelCase__ ) , '''capital''' )
88
'''simple docstring''' import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=4 , ): '''simple docstring''' lowercase : int =parent lowercase : List[str] =batch_size lowercase : str =seq_length lowercase : Optional[Any] =is_training lowercase : Union[str, Any] =use_attention_mask lowercase : Optional[Any] =use_token_type_ids lowercase : Tuple =use_labels lowercase : List[str] =vocab_size lowercase : List[str] =hidden_size lowercase : Tuple =num_hidden_layers lowercase : Any =num_attention_heads lowercase : List[str] =intermediate_size lowercase : Optional[Any] =hidden_act lowercase : Dict =hidden_dropout_prob lowercase : List[Any] =attention_probs_dropout_prob lowercase : Optional[Any] =max_position_embeddings lowercase : Tuple =type_vocab_size lowercase : Optional[int] =type_sequence_label_size lowercase : Optional[Any] =initializer_range lowercase : Optional[int] =num_choices def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Union[str, Any] =None if self.use_attention_mask: lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) lowercase : Tuple =None if self.use_token_type_ids: lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase : int =RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : List[Any] =self.prepare_config_and_inputs() lowercase , lowercase , lowercase , lowercase : str =config_and_inputs lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : List[str] =self.prepare_config_and_inputs() lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs lowercase : List[str] =True lowercase : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = True lowerCamelCase_ = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =FlaxRobertaModelTester(self ) @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase : Optional[int] =model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCAmelCase__ ) lowercase : List[Any] =model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase__ )
88
1
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> list[list[int]]: lowercase : list[list[int]] =[] create_all_state(1 , __magic_name__ , __magic_name__ , [] , __magic_name__ ) return result def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : list[list[int]] , ) -> None: if level == 0: total_list.append(current_list[:] ) return for i in range(__magic_name__ , total_number - level + 2 ): current_list.append(__magic_name__ ) create_all_state(i + 1 , __magic_name__ , level - 1 , __magic_name__ , __magic_name__ ) current_list.pop() def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> None: for i in total_list: print(*__magic_name__ ) if __name__ == "__main__": UpperCamelCase_ = 4 UpperCamelCase_ = 2 UpperCamelCase_ = generate_all_combinations(n, k) print_all_state(total_list)
88
'''simple docstring''' import mpmath # for roots of unity import numpy as np class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None ): '''simple docstring''' # Input as list lowercase : Optional[int] =list(poly_a or [0] )[:] lowercase : Optional[Any] =list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() lowercase : Any =len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() lowercase : Dict =len(self.polyB ) # Add 0 to make lengths equal a power of 2 lowercase : int =int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform lowercase : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product lowercase : Tuple =self.__multiply() def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Union[str, Any] =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB] # Corner case if len(UpperCAmelCase__ ) <= 1: return dft[0] # lowercase : Any =self.c_max_length // 2 while next_ncol > 0: lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )] lowercase : Tuple =self.root**next_ncol # First half of next step lowercase : str =1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(UpperCAmelCase__ ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step lowercase : int =1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(UpperCAmelCase__ ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update lowercase : Dict =new_dft lowercase : Tuple =next_ncol // 2 return dft[0] def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Any =self.__dft('''A''' ) lowercase : Any =self.__dft('''B''' ) lowercase : Optional[int] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT lowercase : Optional[int] =2 while next_ncol <= self.c_max_length: lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )] lowercase : List[str] =self.root ** (next_ncol // 2) lowercase : Optional[int] =1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update lowercase : List[Any] =new_inverse_c next_ncol *= 2 # Unpack lowercase : Tuple =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : Any ): '''simple docstring''' lowercase : Any ='''A = ''' + ''' + '''.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) lowercase : Tuple ='''B = ''' + ''' + '''.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) lowercase : List[str] ='''A*B = ''' + ''' + '''.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return F'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
88
1
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] ) -> bool: # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : list[int] , __magic_name__ : int ) -> bool: # Base Case if curr_ind == len(__magic_name__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__magic_name__ ) ): if valid_connection(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): # Insert current vertex into path as next transition lowercase : Tuple =next_ver # Validate created path if util_hamilton_cycle(__magic_name__ , __magic_name__ , curr_ind + 1 ): return True # Backtrack lowercase : Dict =-1 return False def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int = 0 ) -> list[int]: lowercase : List[str] =[-1] * (len(__magic_name__ ) + 1) # initialize start and end of path with starting index lowercase : List[str] =start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__magic_name__ , __magic_name__ , 1 ) else []
88
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""PLBartTokenizer"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """PLBartForCausalLM""", """PLBartForConditionalGeneration""", """PLBartForSequenceClassification""", """PLBartModel""", """PLBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
88
1
'''simple docstring''' from math import isqrt, loga def _lowerCAmelCase ( __magic_name__ : int ) -> list[int]: lowercase : Dict =[True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __magic_name__ , __magic_name__ ): lowercase : Dict =False return [i for i in range(2 , __magic_name__ ) if is_prime[i]] def _lowerCAmelCase ( __magic_name__ : int = 800800 , __magic_name__ : int = 800800 ) -> int: lowercase : Dict =degree * loga(__magic_name__ ) lowercase : Optional[int] =int(__magic_name__ ) lowercase : List[str] =calculate_prime_numbers(__magic_name__ ) lowercase : List[Any] =0 lowercase : Optional[Any] =0 lowercase : Dict =len(__magic_name__ ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'''{solution() = }''')
88
'''simple docstring''' import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'vision-encoder-decoder' lowerCamelCase_ = True def __init__( self : Optional[int] , **UpperCAmelCase__ : Tuple ): '''simple docstring''' super().__init__(**UpperCAmelCase__ ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( F'''A configuraton of type {self.model_type} cannot be instantiated because ''' F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) lowercase : Optional[Any] =kwargs.pop('''encoder''' ) lowercase : List[Any] =encoder_config.pop('''model_type''' ) lowercase : List[str] =kwargs.pop('''decoder''' ) lowercase : Dict =decoder_config.pop('''model_type''' ) lowercase : Union[str, Any] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : List[str] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : str =True @classmethod def lowerCamelCase_ ( cls : List[str] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Optional[int] ): '''simple docstring''' logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowercase : int =True lowercase : Optional[Any] =True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int =copy.deepcopy(self.__dict__ ) lowercase : Union[str, Any] =self.encoder.to_dict() lowercase : Union[str, Any] =self.decoder.to_dict() lowercase : int =self.__class__.model_type return output class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = version.parse('1.11' ) @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return 1E-4 @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} ) class __SCREAMING_SNAKE_CASE ( lowercase__ ): @property def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : List[str] =OrderedDict() lowercase : Tuple ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''} lowercase : Optional[int] ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''} lowercase : int ={0: '''batch''', 1: '''encoder_sequence'''} return common_inputs def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , ): '''simple docstring''' import torch lowercase : Optional[Any] =OrderedDict() lowercase : List[Any] =super().generate_dummy_inputs( UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ ) lowercase , lowercase : Optional[int] =dummy_input['''input_ids'''].shape lowercase : Union[str, Any] =(batch, encoder_sequence, self._config.encoder_hidden_size) lowercase : List[str] =dummy_input.pop('''input_ids''' ) lowercase : Tuple =dummy_input.pop('''attention_mask''' ) lowercase : Union[str, Any] =torch.zeros(UpperCAmelCase__ ) return common_inputs class __SCREAMING_SNAKE_CASE ( lowercase__ ): @property def lowerCamelCase_ ( self : int ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : PretrainedConfig ): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" ): '''simple docstring''' lowercase : List[Any] =encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase__ , UpperCAmelCase__ )
88
1
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = OrderedDict( [ ("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""), ("""beit""", """BeitFeatureExtractor"""), ("""chinese_clip""", """ChineseCLIPFeatureExtractor"""), ("""clap""", """ClapFeatureExtractor"""), ("""clip""", """CLIPFeatureExtractor"""), ("""clipseg""", """ViTFeatureExtractor"""), ("""conditional_detr""", """ConditionalDetrFeatureExtractor"""), ("""convnext""", """ConvNextFeatureExtractor"""), ("""cvt""", """ConvNextFeatureExtractor"""), ("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""), ("""data2vec-vision""", """BeitFeatureExtractor"""), ("""deformable_detr""", """DeformableDetrFeatureExtractor"""), ("""deit""", """DeiTFeatureExtractor"""), ("""detr""", """DetrFeatureExtractor"""), ("""dinat""", """ViTFeatureExtractor"""), ("""donut-swin""", """DonutFeatureExtractor"""), ("""dpt""", """DPTFeatureExtractor"""), ("""encodec""", """EncodecFeatureExtractor"""), ("""flava""", """FlavaFeatureExtractor"""), ("""glpn""", """GLPNFeatureExtractor"""), ("""groupvit""", """CLIPFeatureExtractor"""), ("""hubert""", """Wav2Vec2FeatureExtractor"""), ("""imagegpt""", """ImageGPTFeatureExtractor"""), ("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""), ("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""), ("""levit""", """LevitFeatureExtractor"""), ("""maskformer""", """MaskFormerFeatureExtractor"""), ("""mctct""", """MCTCTFeatureExtractor"""), ("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""), ("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""), ("""mobilevit""", """MobileViTFeatureExtractor"""), ("""nat""", """ViTFeatureExtractor"""), ("""owlvit""", """OwlViTFeatureExtractor"""), ("""perceiver""", """PerceiverFeatureExtractor"""), ("""poolformer""", """PoolFormerFeatureExtractor"""), ("""regnet""", """ConvNextFeatureExtractor"""), ("""resnet""", """ConvNextFeatureExtractor"""), ("""segformer""", """SegformerFeatureExtractor"""), ("""sew""", """Wav2Vec2FeatureExtractor"""), ("""sew-d""", """Wav2Vec2FeatureExtractor"""), ("""speech_to_text""", """Speech2TextFeatureExtractor"""), ("""speecht5""", """SpeechT5FeatureExtractor"""), ("""swiftformer""", """ViTFeatureExtractor"""), ("""swin""", """ViTFeatureExtractor"""), ("""swinv2""", """ViTFeatureExtractor"""), ("""table-transformer""", """DetrFeatureExtractor"""), ("""timesformer""", """VideoMAEFeatureExtractor"""), ("""tvlt""", """TvltFeatureExtractor"""), ("""unispeech""", """Wav2Vec2FeatureExtractor"""), ("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""), ("""van""", """ConvNextFeatureExtractor"""), ("""videomae""", """VideoMAEFeatureExtractor"""), ("""vilt""", """ViltFeatureExtractor"""), ("""vit""", """ViTFeatureExtractor"""), ("""vit_mae""", """ViTFeatureExtractor"""), ("""vit_msn""", """ViTFeatureExtractor"""), ("""wav2vec2""", """Wav2Vec2FeatureExtractor"""), ("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""), ("""wavlm""", """Wav2Vec2FeatureExtractor"""), ("""whisper""", """WhisperFeatureExtractor"""), ("""xclip""", """CLIPFeatureExtractor"""), ("""yolos""", """YolosFeatureExtractor"""), ] ) UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def _lowerCAmelCase ( __magic_name__ : str ) -> int: for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: lowercase : int =model_type_to_module_name(__magic_name__ ) lowercase : int =importlib.import_module(f'''.{module_name}''' , '''transformers.models''' ) try: return getattr(__magic_name__ , __magic_name__ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(__magic_name__ , '''__name__''' , __magic_name__ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowercase : Any =importlib.import_module('''transformers''' ) if hasattr(__magic_name__ , __magic_name__ ): return getattr(__magic_name__ , __magic_name__ ) return None def _lowerCAmelCase ( __magic_name__ : Union[str, os.PathLike] , __magic_name__ : Optional[Union[str, os.PathLike]] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : Optional[Dict[str, str]] = None , __magic_name__ : Optional[Union[bool, str]] = None , __magic_name__ : Optional[str] = None , __magic_name__ : bool = False , **__magic_name__ : List[str] , ) -> str: lowercase : str =get_file_from_repo( __magic_name__ , __magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , resume_download=__magic_name__ , proxies=__magic_name__ , use_auth_token=__magic_name__ , revision=__magic_name__ , local_files_only=__magic_name__ , ) if resolved_config_file is None: logger.info( '''Could not locate the feature extractor configuration file, will try to use the model config instead.''' ) return {} with open(__magic_name__ , encoding='''utf-8''' ) as reader: return json.load(__magic_name__ ) class __SCREAMING_SNAKE_CASE : def __init__( self : Tuple ): '''simple docstring''' raise EnvironmentError( '''AutoFeatureExtractor is designed to be instantiated ''' '''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(UpperCAmelCase__ ) def lowerCamelCase_ ( cls : List[str] , UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Any ): '''simple docstring''' lowercase : str =kwargs.pop('''config''' , UpperCAmelCase__ ) lowercase : List[Any] =kwargs.pop('''trust_remote_code''' , UpperCAmelCase__ ) lowercase : Optional[int] =True lowercase , lowercase : Optional[int] =FeatureExtractionMixin.get_feature_extractor_dict(UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : Optional[Any] =config_dict.get('''feature_extractor_type''' , UpperCAmelCase__ ) lowercase : Optional[Any] =None if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ): lowercase : int =config_dict['''auto_map''']['''AutoFeatureExtractor'''] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase : Dict =AutoConfig.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) # It could be in `config.feature_extractor_type`` lowercase : int =getattr(UpperCAmelCase__ , '''feature_extractor_type''' , UpperCAmelCase__ ) if hasattr(UpperCAmelCase__ , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map: lowercase : int =config.auto_map['''AutoFeatureExtractor'''] if feature_extractor_class is not None: lowercase : int =feature_extractor_class_from_name(UpperCAmelCase__ ) lowercase : List[str] =feature_extractor_auto_map is not None lowercase : List[Any] =feature_extractor_class is not None or type(UpperCAmelCase__ ) in FEATURE_EXTRACTOR_MAPPING lowercase : Any =resolve_trust_remote_code( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if has_remote_code and trust_remote_code: lowercase : Any =get_class_from_dynamic_module( UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : Union[str, Any] =kwargs.pop('''code_revision''' , UpperCAmelCase__ ) if os.path.isdir(UpperCAmelCase__ ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(UpperCAmelCase__ ) in FEATURE_EXTRACTOR_MAPPING: lowercase : Optional[int] =FEATURE_EXTRACTOR_MAPPING[type(UpperCAmelCase__ )] return feature_extractor_class.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ ) raise ValueError( F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def lowerCamelCase_ ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int ): '''simple docstring''' FEATURE_EXTRACTOR_MAPPING.register(UpperCAmelCase__ , UpperCAmelCase__ )
88
'''simple docstring''' import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) UpperCamelCase_ = logging.getLogger(__name__) UpperCamelCase_ = tf.data.AUTOTUNE def _lowerCAmelCase ( ) -> Any: lowercase : Dict =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' ) parser.add_argument( '''--pretrained_model_config''' , type=__magic_name__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , ) parser.add_argument( '''--tokenizer''' , type=__magic_name__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , ) parser.add_argument( '''--per_replica_batch_size''' , type=__magic_name__ , default=8 , help='''Batch size per TPU core.''' , ) parser.add_argument( '''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , ) parser.add_argument( '''--tpu_name''' , type=__magic_name__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , ) parser.add_argument( '''--tpu_zone''' , type=__magic_name__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , ) parser.add_argument( '''--gcp_project''' , type=__magic_name__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' ) parser.add_argument( '''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , ) parser.add_argument( '''--train_dataset''' , type=__magic_name__ , help='''Path to training dataset to load. If the path begins with `gs://`''' ''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , ) parser.add_argument( '''--shuffle_buffer_size''' , type=__magic_name__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , ) parser.add_argument( '''--eval_dataset''' , type=__magic_name__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`''' ''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , ) parser.add_argument( '''--num_epochs''' , type=__magic_name__ , default=1 , help='''Number of epochs to train for.''' , ) parser.add_argument( '''--learning_rate''' , type=__magic_name__ , default=1E-4 , help='''Learning rate to use for training.''' , ) parser.add_argument( '''--weight_decay_rate''' , type=__magic_name__ , default=1E-3 , help='''Weight decay rate to use for training.''' , ) parser.add_argument( '''--max_length''' , type=__magic_name__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , ) parser.add_argument( '''--mlm_probability''' , type=__magic_name__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , ) parser.add_argument('''--output_dir''' , type=__magic_name__ , required=__magic_name__ , help='''Path to save model checkpoints to.''' ) parser.add_argument('''--hub_model_id''' , type=__magic_name__ , help='''Model ID to upload to on the Hugging Face Hub.''' ) lowercase : Union[str, Any] =parser.parse_args() return args def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[Any]: try: if args.tpu_name: lowercase : Dict =tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: lowercase : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( '''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or ''' '''--gcp_project. When running on a TPU VM, use --tpu_name local.''' ) tf.config.experimental_connect_to_cluster(__magic_name__ ) tf.tpu.experimental.initialize_tpu_system(__magic_name__ ) return tpu def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]: lowercase : str =0 for file in file_list: lowercase : List[str] =file.split('''/''' )[-1] lowercase : Union[str, Any] =re.search(R'''-\d+-(\d+)\.tfrecord''' , __magic_name__ ).group(1 ) lowercase : int =int(__magic_name__ ) num_samples += sample_count return num_samples def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> str: lowercase : int =count_samples(__magic_name__ ) lowercase : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__magic_name__ ) if shuffle: lowercase : Union[str, Any] =dataset.shuffle(len(__magic_name__ ) ) lowercase : Any =tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here lowercase : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) ) lowercase : str =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ ) if shuffle: assert shuffle_buffer_size is not None lowercase : int =dataset.shuffle(args.shuffle_buffer_size ) lowercase : Optional[int] =dataset.batch(__magic_name__ , drop_remainder=__magic_name__ ) lowercase : int =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ ) lowercase : Union[str, Any] =dataset.prefetch(__magic_name__ ) return dataset def _lowerCAmelCase ( __magic_name__ : Any ) -> str: if not args.no_tpu: lowercase : Optional[Any] =initialize_tpu(__magic_name__ ) lowercase : Any =tf.distribute.TPUStrategy(__magic_name__ ) else: lowercase : Optional[Any] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' ) lowercase : Any =AutoTokenizer.from_pretrained(args.tokenizer ) lowercase : Union[str, Any] =AutoConfig.from_pretrained(args.pretrained_model_config ) lowercase : Optional[Any] =tokenizer.vocab_size lowercase : str =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) ) if not training_records: raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' ) lowercase : Optional[int] =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) ) if not eval_records: raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' ) lowercase : Any =count_samples(__magic_name__ ) lowercase : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) lowercase : Union[str, Any] =steps_per_epoch * args.num_epochs with strategy.scope(): lowercase : List[Any] =TFAutoModelForMaskedLM.from_config(__magic_name__ ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built lowercase , lowercase : Dict =create_optimizer( num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=__magic_name__ , metrics=['''accuracy'''] ) def decode_fn(__magic_name__ : Optional[Any] ): lowercase : Union[str, Any] ={ '''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), '''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(__magic_name__ , __magic_name__ ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. lowercase : str =DataCollatorForLanguageModeling( tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='''tf''' ) def mask_with_collator(__magic_name__ : Dict ): # TF really needs an isin() function lowercase : int =( ~tf.cast(batch['''attention_mask'''] , tf.bool ) | (batch['''input_ids'''] == tokenizer.cls_token_id) | (batch['''input_ids'''] == tokenizer.sep_token_id) ) lowercase , lowercase : Union[str, Any] =data_collator.tf_mask_tokens( batch['''input_ids'''] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , ) return batch lowercase : List[str] =args.per_replica_batch_size * strategy.num_replicas_in_sync lowercase : Dict =prepare_dataset( __magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , ) lowercase : Union[str, Any] =prepare_dataset( __magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , ) lowercase : Tuple =[] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) ) model.fit( __magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": UpperCamelCase_ = parse_args() main(args)
88
1
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = """▁""" UpperCamelCase_ = { """vocab_file""": """vocab.json""", """spm_file""": """sentencepiece.bpe.model""", """tokenizer_config_file""": """tokenizer_config.json""", } UpperCamelCase_ = { """vocab_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""", }, """spm_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""", }, """tokenizer_config_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""", }, } UpperCamelCase_ = { """facebook/m2m100_418M""": 1024, } # fmt: off UpperCamelCase_ = { """m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""], """wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""] } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = VOCAB_FILES_NAMES lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ = ['input_ids', 'attention_mask'] lowerCamelCase_ = [] lowerCamelCase_ = [] def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Tuple="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : Union[str, Any]="<pad>" , UpperCAmelCase__ : Tuple="<unk>" , UpperCAmelCase__ : str="m2m100" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , UpperCAmelCase__ : Union[str, Any]=8 , **UpperCAmelCase__ : Optional[int] , ): '''simple docstring''' lowercase : Dict ={} if sp_model_kwargs is None else sp_model_kwargs lowercase : Any =language_codes lowercase : Optional[Any] =FAIRSEQ_LANGUAGE_CODES[language_codes] lowercase : Tuple ={lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code} lowercase : int =kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(UpperCAmelCase__ ) for lang_code in fairseq_language_code if self.get_lang_token(UpperCAmelCase__ ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , language_codes=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=UpperCAmelCase__ , **UpperCAmelCase__ , ) lowercase : Union[str, Any] =vocab_file lowercase : List[str] =load_json(UpperCAmelCase__ ) lowercase : List[str] ={v: k for k, v in self.encoder.items()} lowercase : Optional[Any] =spm_file lowercase : Dict =load_spm(UpperCAmelCase__ , self.sp_model_kwargs ) lowercase : Optional[Any] =len(self.encoder ) lowercase : Dict ={ self.get_lang_token(UpperCAmelCase__ ): self.encoder_size + i for i, lang_code in enumerate(UpperCAmelCase__ ) } lowercase : Union[str, Any] ={lang_code: self.encoder_size + i for i, lang_code in enumerate(UpperCAmelCase__ )} lowercase : List[Any] ={v: k for k, v in self.lang_token_to_id.items()} lowercase : Optional[int] =src_lang if src_lang is not None else '''en''' lowercase : Any =tgt_lang lowercase : str =self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) lowercase : Optional[int] =num_madeup_words @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return len(self.encoder ) + len(self.lang_token_to_id ) @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return self._src_lang @src_lang.setter def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : Tuple =new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : str ): '''simple docstring''' return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(UpperCAmelCase__ , self.encoder[self.unk_token] ) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int ): '''simple docstring''' if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(UpperCAmelCase__ , self.unk_token ) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' lowercase : Union[str, Any] =[] lowercase : Union[str, Any] ='''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(UpperCAmelCase__ ) + token lowercase : Any =[] else: current_sub_tokens.append(UpperCAmelCase__ ) out_string += self.sp_model.decode(UpperCAmelCase__ ) return out_string.strip() def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ ) lowercase : int =[1] * len(self.prefix_tokens ) lowercase : List[Any] =[1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(UpperCAmelCase__ )) + suffix_ones return prefix_ones + ([0] * len(UpperCAmelCase__ )) + ([0] * len(UpperCAmelCase__ )) + suffix_ones def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : str ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[int] ): '''simple docstring''' lowercase : List[str] =self.__dict__.copy() lowercase : int =None return state def __setstate__( self : str , UpperCAmelCase__ : Dict ): '''simple docstring''' lowercase : Optional[int] =d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase : Dict ={} lowercase : str =load_spm(self.spm_file , self.sp_model_kwargs ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ): '''simple docstring''' lowercase : Tuple =Path(UpperCAmelCase__ ) if not save_dir.is_dir(): raise OSError(F'''{save_directory} should be a directory''' ) lowercase : Any =save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) lowercase : Any =save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , UpperCAmelCase__ ) if os.path.abspath(self.spm_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , UpperCAmelCase__ ) elif not os.path.isfile(self.spm_file ): with open(UpperCAmelCase__ , '''wb''' ) as fi: lowercase : Optional[Any] =self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase__ ) return (str(UpperCAmelCase__ ), str(UpperCAmelCase__ )) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str = "en" , UpperCAmelCase__ : Optional[List[str]] = None , UpperCAmelCase__ : str = "ro" , **UpperCAmelCase__ : Any , ): '''simple docstring''' lowercase : List[str] =src_lang lowercase : Tuple =tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] , **UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) lowercase : Tuple =src_lang lowercase : Union[str, Any] =self(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : int =self.get_lang_id(UpperCAmelCase__ ) lowercase : Optional[Any] =tgt_lang_id return inputs def lowerCamelCase_ ( self : Dict ): '''simple docstring''' self.set_src_lang_special_tokens(self.src_lang ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : Dict =self.get_lang_token(UpperCAmelCase__ ) lowercase : Optional[int] =self.lang_token_to_id[lang_token] lowercase : Dict =[self.cur_lang_id] lowercase : Optional[Any] =[self.eos_token_id] def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : Optional[Any] =self.get_lang_token(UpperCAmelCase__ ) lowercase : Union[str, Any] =self.lang_token_to_id[lang_token] lowercase : Any =[self.cur_lang_id] lowercase : Any =[self.eos_token_id] def lowerCamelCase_ ( self : str , UpperCAmelCase__ : str ): '''simple docstring''' return self.lang_code_to_token[lang] def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : int =self.get_lang_token(UpperCAmelCase__ ) return self.lang_token_to_id[lang_token] def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: lowercase : Dict =sentencepiece.SentencePieceProcessor(**__magic_name__ ) spm.Load(str(__magic_name__ ) ) return spm def _lowerCAmelCase ( __magic_name__ : str ) -> Union[Dict, List]: with open(__magic_name__ , '''r''' ) as f: return json.load(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : str ) -> None: with open(__magic_name__ , '''w''' ) as f: json.dump(__magic_name__ , __magic_name__ , indent=2 )
88
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys UpperCamelCase_ = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
88
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""FNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""FNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """FNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FNetForMaskedLM""", """FNetForMultipleChoice""", """FNetForNextSentencePrediction""", """FNetForPreTraining""", """FNetForQuestionAnswering""", """FNetForSequenceClassification""", """FNetForTokenClassification""", """FNetLayer""", """FNetModel""", """FNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = """▁""" UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""} UpperCamelCase_ = { """vocab_file""": { """facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""", } } UpperCamelCase_ = { """facebook/xglm-564M""": 2048, } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = VOCAB_FILES_NAMES lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ = ['input_ids', 'attention_mask'] def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ): '''simple docstring''' lowercase : int ={} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer lowercase : Optional[Any] =7 lowercase : Optional[int] =[F'''<madeupword{i}>''' for i in range(self.num_madeup_words )] lowercase : List[Any] =kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , ) lowercase : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCAmelCase__ ) ) lowercase : List[Any] =vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase : Union[str, Any] =1 # Mimic fairseq token-to-id alignment for the first 4 token lowercase : List[str] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} lowercase : str =len(self.sp_model ) lowercase : List[Any] ={F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(UpperCAmelCase__ ) lowercase : int ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : int ): '''simple docstring''' lowercase : Optional[int] =self.__dict__.copy() lowercase : List[Any] =None lowercase : Tuple =self.sp_model.serialized_model_proto() return state def __setstate__( self : Optional[Any] , UpperCAmelCase__ : Any ): '''simple docstring''' lowercase : int =d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase : Optional[int] ={} lowercase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a lowercase : List[Any] =[self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase__ )) return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ )) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase : int =[self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : int ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ): '''simple docstring''' return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase : List[str] =self.sp_model.PieceToId(UpperCAmelCase__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' lowercase : Dict =''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , ''' ''' ).strip() return out_string def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(UpperCAmelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase : Dict =os.path.join( UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase__ , '''wb''' ) as fi: lowercase : Optional[int] =self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase__ ) return (out_vocab_file,)
88
1
'''simple docstring''' import torch def _lowerCAmelCase ( ) -> Any: if torch.cuda.is_available(): lowercase : Any =torch.cuda.device_count() else: lowercase : Optional[Any] =0 print(f'''Successfully ran on {num_gpus} GPUs''' ) if __name__ == "__main__": main()
88
'''simple docstring''' import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def _lowerCAmelCase ( __magic_name__ : str ) -> Union[str, Any]: lowercase : Union[str, Any] =os.path.join(args.tf_model_dir , '''parameters.json''' ) lowercase : List[str] =json.loads(open(__magic_name__ ).read() ) if not params: raise ValueError( f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' ) if not args.output.endswith('''.pt''' ): lowercase : Tuple =args.output + '''.pt''' lowercase : int =OrderedDict() with tf.device('''/CPU:0''' ): lowercase : List[Any] =tf.train.load_checkpoint(args.tf_model_dir ) lowercase : int =reader.get_variable_to_shape_map() for key_name in shapes.keys(): lowercase : Any =reader.get_tensor(__magic_name__ ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): lowercase : int =int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): lowercase : Union[str, Any] =8 lowercase : Any ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time lowercase : Dict =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase : List[str] =torch.tensor(__magic_name__ ) elif key_name.startswith('''model/moe''' ): lowercase : Union[str, Any] =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player lowercase : Any =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase : List[Any] =torch.tensor(__magic_name__ ) elif key_name.endswith('''/softmlp/kernel''' ): lowercase : Optional[int] ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase : Optional[Any] =torch.tensor(__magic_name__ ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): lowercase : Union[str, Any] =key_name[-9:-7] for i in range(16 ): lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) lowercase : Any =( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided lowercase : List[str] =torch.tensor(__magic_name__ ) elif key_name.startswith('''model/mlp''' ): lowercase : Dict =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): lowercase : Any ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player lowercase : str =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase : Any =torch.tensor(__magic_name__ ) elif key_name.endswith('''/p1/bias''' ): lowercase : List[Any] ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional lowercase : Union[str, Any] =torch.tensor(__magic_name__ ) elif key_name.endswith('''/p2/kernel''' ): lowercase : int ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player lowercase : Tuple =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase : int =torch.tensor(__magic_name__ ) elif key_name.endswith('''/p2/bias''' ): lowercase : str ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player lowercase : Optional[int] =vnp.copy() # same because it is one dimensional lowercase : List[Any] =torch.tensor(__magic_name__ ) elif key_name.startswith('''model/ln''' ): lowercase : int =int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase : Any ='''model.blocks.%d.feed_forward.norm.bias''' % player lowercase : Optional[int] =vnp.copy() # same because it is one dimensional lowercase : Union[str, Any] =torch.tensor(__magic_name__ ) elif key_name.endswith('''/g''' ): lowercase : Optional[Any] ='''model.blocks.%d.feed_forward.norm.weight''' % player lowercase : Any =vnp.copy() # same because it is one dimensional lowercase : List[Any] =torch.tensor(__magic_name__ ) elif key_name.startswith('''model/att''' ): lowercase : int =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): lowercase : Optional[int] =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum lowercase : Dict =state[:, 0, :, :] lowercase : Tuple =state[:, 1, :, :] lowercase : List[Any] =state[:, 2, :, :] lowercase : Optional[int] =( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase : Optional[Any] =( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase : Optional[int] =( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player lowercase : Dict =torch.tensor(__magic_name__ ) lowercase : List[Any] ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player lowercase : Optional[Any] =torch.tensor(__magic_name__ ) lowercase : Optional[Any] ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player lowercase : Tuple =torch.tensor(__magic_name__ ) elif key_name.endswith('''/o/kernel''' ): lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player lowercase : List[Any] =( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase : str =torch.tensor(__magic_name__ ) elif key_name.startswith('''model/an''' ): lowercase : Optional[Any] =int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase : List[str] ='''model.blocks.%d.self_attn.norm.bias''' % player lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional lowercase : List[str] =torch.tensor(__magic_name__ ) elif key_name.endswith('''/g''' ): lowercase : Any ='''model.blocks.%d.self_attn.norm.weight''' % player lowercase : Any =vnp.copy() # same because it is one dimensional lowercase : Optional[Any] =torch.tensor(__magic_name__ ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): lowercase : Any ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] lowercase : Optional[Any] ='''model.%s.weight''' % nlayer lowercase : Optional[int] =vnp.copy() # same in embedded lowercase : List[Any] =torch.tensor(__magic_name__ ) if key_name.startswith('''model/wte''' ): lowercase : Tuple ='''lm_head.weight''' lowercase : str =vnp.copy() # same in embedded lowercase : Union[str, Any] =torch.tensor(__magic_name__ ) elif key_name.startswith('''model/wob''' ): lowercase : List[str] ='''final_logits_bias''' lowercase : Dict =vnp.copy() # same in embedded lowercase : Tuple =state.reshape((1, -1) ) lowercase : Dict =torch.tensor(__magic_name__ ) elif key_name == "model/dense/kernel": lowercase : Dict ='''model.last_project.weight''' lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase : Optional[Any] =torch.tensor(__magic_name__ ) elif key_name == "model/dense_1/bias": lowercase : List[Any] ='''model.last_project.bias''' lowercase : str =vnp.copy() # same because it is one dimensional lowercase : Optional[Any] =torch.tensor(__magic_name__ ) torch.save(__magic_name__ , args.output ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser( description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""") parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""") UpperCamelCase_ = parser.parse_args() convert_tf_gptsan_to_pt(args)
88
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase_ = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
'''simple docstring''' import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase_ = """▁""" UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = BigBirdTokenizer lowerCamelCase_ = BigBirdTokenizerFast lowerCamelCase_ = True lowerCamelCase_ = True def lowerCamelCase_ ( self : Any ): '''simple docstring''' super().setUp() lowercase : Optional[int] =self.tokenizer_class(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[int] ='''<s>''' lowercase : int =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Dict =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''[MASK]''' ) self.assertEqual(len(UpperCAmelCase__ ) , 1004 ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' if not self.test_rust_tokenizer: return lowercase : Optional[int] =self.get_tokenizer() lowercase : Any =self.get_rust_tokenizer() lowercase : int ='''I was born in 92000, and this is falsé.''' lowercase : List[str] =tokenizer.tokenize(UpperCAmelCase__ ) lowercase : Dict =rust_tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : str =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Optional[Any] =self.get_rust_tokenizer() lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ ) lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Tuple =BigBirdTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) lowercase : Tuple =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , ) lowercase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) lowercase : Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) lowercase : List[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def lowerCamelCase_ ( self : str ): '''simple docstring''' return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : str ='''Hello World!''' lowercase : Union[str, Any] =[65, 18536, 2260, 101, 66] self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @slow def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : int =( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) # fmt: off lowercase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231 # fmt: on self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @require_torch @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence lowercase : List[str] =list(self.big_tokenizer.get_vocab().keys() )[:10] lowercase : Dict =''' '''.join(UpperCAmelCase__ ) lowercase : Union[str, Any] =self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ ) lowercase : Dict =self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ ) lowercase : Optional[int] =BigBirdConfig(attention_type='''original_full''' ) lowercase : Dict =BigBirdModel(UpperCAmelCase__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**UpperCAmelCase__ ) model(**UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Union[str, Any] =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) lowercase : Dict =tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids ) self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' ) @slow def lowerCamelCase_ ( self : int ): '''simple docstring''' # fmt: off lowercase : str ={'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
88
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self : List[str] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : str ): '''simple docstring''' warnings.warn( '''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use DeformableDetrImageProcessor instead.''' , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
88
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] ) -> str: lowercase : Optional[Any] =[0 for i in range(r + 1 )] # nc0 = 1 lowercase : Optional[Any] =1 for i in range(1 , n + 1 ): # to compute current row from previous row. lowercase : str =min(__magic_name__ , __magic_name__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
88
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""", """funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""", """funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""", """funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""", """funnel-transformer/intermediate""": ( """https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json""" ), """funnel-transformer/intermediate-base""": ( """https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json""" ), """funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""", """funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""", """funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""", """funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""", } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'funnel' lowerCamelCase_ = { 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', } def __init__( self : List[str] , UpperCAmelCase__ : Dict=30522 , UpperCAmelCase__ : Dict=[4, 4, 4] , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : List[str]=768 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Dict=64 , UpperCAmelCase__ : str=3072 , UpperCAmelCase__ : int="gelu_new" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : str=1E-9 , UpperCAmelCase__ : int="mean" , UpperCAmelCase__ : Tuple="relative_shift" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=True , **UpperCAmelCase__ : int , ): '''simple docstring''' lowercase : List[str] =vocab_size lowercase : Dict =block_sizes lowercase : List[str] =[1] * len(UpperCAmelCase__ ) if block_repeats is None else block_repeats assert len(UpperCAmelCase__ ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." lowercase : List[str] =num_decoder_layers lowercase : Union[str, Any] =d_model lowercase : List[str] =n_head lowercase : List[str] =d_head lowercase : Dict =d_inner lowercase : Any =hidden_act lowercase : str =hidden_dropout lowercase : Optional[int] =attention_dropout lowercase : List[Any] =activation_dropout lowercase : List[str] =initializer_range lowercase : Union[str, Any] =initializer_std lowercase : Dict =layer_norm_eps assert pooling_type in [ "mean", "max", ], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.''' lowercase : Optional[Any] =pooling_type assert attention_type in [ "relative_shift", "factorized", ], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.''' lowercase : str =attention_type lowercase : List[str] =separate_cls lowercase : Tuple =truncate_seq lowercase : List[Any] =pool_q_only super().__init__(**UpperCAmelCase__ ) @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return sum(self.block_sizes ) @num_hidden_layers.setter def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ): '''simple docstring''' raise NotImplementedError( '''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' ) @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return len(self.block_sizes ) @num_blocks.setter def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ): '''simple docstring''' raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
88
'''simple docstring''' from collections import defaultdict def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> bool: lowercase : Optional[int] =first_str.lower().strip() lowercase : Union[str, Any] =second_str.lower().strip() # Remove whitespace lowercase : Optional[int] =first_str.replace(''' ''' , '''''' ) lowercase : Optional[Any] =second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(__magic_name__ ) != len(__magic_name__ ): return False # Default values for count should be 0 lowercase : defaultdict[str, int] =defaultdict(__magic_name__ ) # For each character in input strings, # increment count in the corresponding for i in range(len(__magic_name__ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCamelCase_ = input("""Enter the first string """).strip() UpperCamelCase_ = input("""Enter the second string """).strip() UpperCamelCase_ = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
88
1
'''simple docstring''' from __future__ import annotations from dataclasses import dataclass @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 42 lowerCamelCase_ = None lowerCamelCase_ = None def _lowerCAmelCase ( __magic_name__ : TreeNode | None ) -> bool: # Validation def is_valid_tree(__magic_name__ : TreeNode | None ) -> bool: if node is None: return True if not isinstance(__magic_name__ , __magic_name__ ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(__magic_name__ ): raise ValueError( '''Each node should be type of TreeNode and data should be float.''' ) def is_binary_search_tree_recursive_check( __magic_name__ : TreeNode | None , __magic_name__ : float , __magic_name__ : float ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , __magic_name__ , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , __magic_name__ ) ) return is_binary_search_tree_recursive_check(__magic_name__ , -float('''inf''' ) , float('''inf''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
88
'''simple docstring''' import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = None lowerCamelCase_ = BloomTokenizerFast lowerCamelCase_ = BloomTokenizerFast lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = 'tokenizer_file' lowerCamelCase_ = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' super().setUp() lowercase : Union[str, Any] =BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Any ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : str =self.get_rust_tokenizer() lowercase : List[str] =['''The quick brown fox</s>''', '''jumps over the lazy dog</s>'''] lowercase : Any =[[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]] lowercase : Any =tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids'''] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any=6 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input lowercase : Tuple ='''This is a simple input''' lowercase : int =['''This is a simple input 1''', '''This is a simple input 2'''] lowercase : Optional[Any] =('''This is a simple input''', '''This is a pair''') lowercase : int =[ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests try: tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) except ValueError: self.fail('''Bloom Tokenizer should be able to deal with padding''' ) lowercase : Optional[int] =None # Hotfixing padding = None self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' ) # Simple input self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' ) # Simple input self.assertRaises( UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , ) # Pair input self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' ) # Pair input self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' ) # Pair input self.assertRaises( UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Union[str, Any] =self.get_rust_tokenizer() lowercase : Dict =load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ ) lowercase : Union[str, Any] =next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data lowercase : int =list(sample_data.values() ) lowercase : Any =list(map(tokenizer.encode , UpperCAmelCase__ ) ) lowercase : List[str] =[tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
88
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) def _lowerCAmelCase ( __magic_name__ : Any ) -> List[str]: # initialize config if "resnet-50" in model_name: lowercase : Any =ResNetConfig.from_pretrained('''microsoft/resnet-50''' ) elif "resnet-101" in model_name: lowercase : Any =ResNetConfig.from_pretrained('''microsoft/resnet-101''' ) else: raise ValueError('''Model name should include either resnet50 or resnet101''' ) lowercase : Union[str, Any] =DetrConfig(use_timm_backbone=__magic_name__ , backbone_config=__magic_name__ ) # set label attributes lowercase : List[str] ='''panoptic''' in model_name if is_panoptic: lowercase : Any =250 else: lowercase : Any =91 lowercase : Tuple ='''huggingface/label-files''' lowercase : Any ='''coco-detection-id2label.json''' lowercase : Optional[Any] =json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) ) lowercase : Optional[int] ={int(__magic_name__ ): v for k, v in idalabel.items()} lowercase : Any =idalabel lowercase : Tuple ={v: k for k, v in idalabel.items()} return config, is_panoptic def _lowerCAmelCase ( __magic_name__ : Tuple ) -> str: # here we list all keys to be renamed (original name on the left, our name on the right) lowercase : Dict =[] # stem # fmt: off rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') ) rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') ) rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') ) rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') ) rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''', ) ) rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''', ) ) rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''', ) ) rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''', ) ) rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''', ) ) # 3 convs for i in range(3 ): rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''', ) ) rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''', ) ) rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''', ) ) rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''', ) ) rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''', ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''') ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ] ) return rename_keys def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]: lowercase : List[str] =state_dict.pop(__magic_name__ ) lowercase : Tuple =val def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Dict=False ) -> Optional[Any]: lowercase : Union[str, Any] ='''''' if is_panoptic: lowercase : str ='''detr.''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) lowercase : Union[str, Any] =state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) lowercase : Tuple =state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowercase : Dict =in_proj_weight[:256, :] lowercase : int =in_proj_bias[:256] lowercase : Optional[int] =in_proj_weight[256:512, :] lowercase : Dict =in_proj_bias[256:512] lowercase : Union[str, Any] =in_proj_weight[-256:, :] lowercase : Optional[Any] =in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention lowercase : List[str] =state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) lowercase : Optional[Any] =state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowercase : Any =in_proj_weight[:256, :] lowercase : Dict =in_proj_bias[:256] lowercase : Union[str, Any] =in_proj_weight[256:512, :] lowercase : Any =in_proj_bias[256:512] lowercase : Dict =in_proj_weight[-256:, :] lowercase : List[Any] =in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention lowercase : Dict =state_dict.pop( f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) lowercase : int =state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict lowercase : str =in_proj_weight_cross_attn[:256, :] lowercase : Tuple =in_proj_bias_cross_attn[:256] lowercase : Tuple =in_proj_weight_cross_attn[256:512, :] lowercase : Tuple =in_proj_bias_cross_attn[256:512] lowercase : Tuple =in_proj_weight_cross_attn[-256:, :] lowercase : List[str] =in_proj_bias_cross_attn[-256:] def _lowerCAmelCase ( ) -> Optional[int]: lowercase : List[Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase : List[Any] =Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) return im @torch.no_grad() def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : str=None , __magic_name__ : Optional[Any]=False ) -> Dict: lowercase , lowercase : Tuple =get_detr_config(__magic_name__ ) # load original model from torch hub lowercase : List[str] ={ '''detr-resnet-50''': '''detr_resnet50''', '''detr-resnet-101''': '''detr_resnet101''', } logger.info(f'''Converting model {model_name}...''' ) lowercase : Any =torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=__magic_name__ ).eval() lowercase : List[Any] =detr.state_dict() # rename keys for src, dest in create_rename_keys(__magic_name__ ): if is_panoptic: lowercase : Optional[int] ='''detr.''' + src rename_key(__magic_name__ , __magic_name__ , __magic_name__ ) # query, key and value matrices need special treatment read_in_q_k_v(__magic_name__ , is_panoptic=__magic_name__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them lowercase : Optional[Any] ='''detr.model.''' if is_panoptic else '''model.''' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('''detr''' ) and not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ) ): lowercase : Dict =state_dict.pop(__magic_name__ ) lowercase : Dict =val elif "class_labels_classifier" in key or "bbox_predictor" in key: lowercase : Optional[int] =state_dict.pop(__magic_name__ ) lowercase : str =val elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ): continue else: lowercase : Dict =state_dict.pop(__magic_name__ ) lowercase : Union[str, Any] =val else: if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): lowercase : List[str] =state_dict.pop(__magic_name__ ) lowercase : Optional[int] =val # finally, create HuggingFace model and load state dict lowercase : str =DetrForSegmentation(__magic_name__ ) if is_panoptic else DetrForObjectDetection(__magic_name__ ) model.load_state_dict(__magic_name__ ) model.eval() # verify our conversion on an image lowercase : int ='''coco_panoptic''' if is_panoptic else '''coco_detection''' lowercase : Any =DetrImageProcessor(format=__magic_name__ ) lowercase : List[Any] =processor(images=prepare_img() , return_tensors='''pt''' ) lowercase : Optional[int] =encoding['''pixel_values'''] lowercase : Optional[int] =detr(__magic_name__ ) lowercase : int =model(__magic_name__ ) assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) model.save_pretrained(__magic_name__ ) processor.save_pretrained(__magic_name__ ) if push_to_hub: # Upload model and image processor to the hub logger.info('''Uploading PyTorch model and image processor to the hub...''' ) model.push_to_hub(f'''nielsr/{model_name}''' ) processor.push_to_hub(f'''nielsr/{model_name}''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""detr-resnet-50""", type=str, choices=["""detr-resnet-50""", """detr-resnet-101"""], help="""Name of the DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""") UpperCamelCase_ = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
88
'''simple docstring''' import math def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float: if ( not isinstance(__magic_name__ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * power_factor def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float: if ( not isinstance(__magic_name__ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
88
1
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> None: if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): lowercase , lowercase : List[Any] =array[indexa], array[indexa] def _lowerCAmelCase ( __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> None: if length > 1: lowercase : Dict =int(length / 2 ) for i in range(__magic_name__ , low + middle ): comp_and_swap(__magic_name__ , __magic_name__ , i + middle , __magic_name__ ) bitonic_merge(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) bitonic_merge(__magic_name__ , low + middle , __magic_name__ , __magic_name__ ) def _lowerCAmelCase ( __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> None: if length > 1: lowercase : Tuple =int(length / 2 ) bitonic_sort(__magic_name__ , __magic_name__ , __magic_name__ , 1 ) bitonic_sort(__magic_name__ , low + middle , __magic_name__ , 0 ) bitonic_merge(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) if __name__ == "__main__": UpperCamelCase_ = input("""Enter numbers separated by a comma:\n""").strip() UpperCamelCase_ = [int(item.strip()) for item in user_input.split(""",""")] bitonic_sort(unsorted, 0, len(unsorted), 1) print("""\nSorted array in ascending order is: """, end="""""") print(*unsorted, sep=""", """) bitonic_merge(unsorted, 0, len(unsorted), 0) print("""Sorted array in descending order is: """, end="""""") print(*unsorted, sep=""", """)
88
'''simple docstring''' import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ): '''simple docstring''' warnings.warn( '''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use CLIPImageProcessor instead.''' , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
88
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""", """kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""", """kssteven/ibert-roberta-large-mnli""": ( """https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json""" ), } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'ibert' def __init__( self : int , UpperCAmelCase__ : Any=30522 , UpperCAmelCase__ : Tuple=768 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : List[Any]=3072 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Tuple=512 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : str=1E-12 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Union[str, Any]="absolute" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : str="none" , **UpperCAmelCase__ : int , ): '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : Dict =vocab_size lowercase : int =hidden_size lowercase : str =num_hidden_layers lowercase : Any =num_attention_heads lowercase : List[str] =hidden_act lowercase : str =intermediate_size lowercase : int =hidden_dropout_prob lowercase : Optional[int] =attention_probs_dropout_prob lowercase : List[str] =max_position_embeddings lowercase : List[str] =type_vocab_size lowercase : Dict =initializer_range lowercase : Union[str, Any] =layer_norm_eps lowercase : Any =position_embedding_type lowercase : List[Any] =quant_mode lowercase : int =force_dequant class __SCREAMING_SNAKE_CASE ( lowercase__ ): @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' if self.task == "multiple-choice": lowercase : Dict ={0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase : List[Any] ={0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
88
'''simple docstring''' import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser( description=( """Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned""" """ Distillation""" ) ) parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""]) parser.add_argument("""--model_name""", default="""roberta-large""", type=str) parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str) parser.add_argument("""--vocab_transform""", action="""store_true""") UpperCamelCase_ = parser.parse_args() if args.model_type == "roberta": UpperCamelCase_ = RobertaForMaskedLM.from_pretrained(args.model_name) UpperCamelCase_ = """roberta""" elif args.model_type == "gpt2": UpperCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name) UpperCamelCase_ = """transformer""" UpperCamelCase_ = model.state_dict() UpperCamelCase_ = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: UpperCamelCase_ = state_dict[f'''{prefix}.{param_name}'''] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: UpperCamelCase_ = f'''{prefix}.embeddings.{w}.weight''' UpperCamelCase_ = state_dict[param_name] for w in ["weight", "bias"]: UpperCamelCase_ = f'''{prefix}.embeddings.LayerNorm.{w}''' UpperCamelCase_ = state_dict[param_name] # Transformer Blocks # UpperCamelCase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[ f'''{prefix}.h.{teacher_idx}.{layer}.{w}''' ] UpperCamelCase_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias'''] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}''' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: UpperCamelCase_ = state_dict[f'''{layer}'''] if args.vocab_transform: for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[f'''lm_head.dense.{w}'''] UpperCamelCase_ = state_dict[f'''lm_head.layer_norm.{w}'''] elif args.model_type == "gpt2": for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[f'''{prefix}.ln_f.{w}'''] UpperCamelCase_ = state_dict["""lm_head.weight"""] print(f'''N layers selected for distillation: {std_idx}''') print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
88
1
'''simple docstring''' # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
88
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict: for param in module.parameters(): lowercase : List[str] =False def _lowerCAmelCase ( ) -> List[str]: lowercase : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowercase : Optional[int] ='''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> str: lowercase : Optional[int] =plt.imshow(__magic_name__ ) fig.axes.get_xaxis().set_visible(__magic_name__ ) fig.axes.get_yaxis().set_visible(__magic_name__ ) plt.show() def _lowerCAmelCase ( ) -> List[Any]: lowercase : Any =datetime.now() lowercase : Dict =current_time.strftime('''%H:%M:%S''' ) return timestamp
88
1
'''simple docstring''' import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() UpperCamelCase_ = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Dict=False , __magic_name__ : Dict=True ) -> Dict: if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase , lowercase , lowercase , lowercase : List[str] =MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase : Any =cached_file(__magic_name__ , __magic_name__ , force_download=not use_cached_models ) lowercase : Optional[int] =config_class.from_json_file(__magic_name__ ) lowercase : Optional[Any] =True lowercase : str =True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase : Dict =model_class(__magic_name__ ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase : List[str] =cached_file( __magic_name__ , __magic_name__ , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase : Optional[Any] =load_pytorch_checkpoint_in_tfa_model(__magic_name__ , __magic_name__ ) if compare_with_pt_model: lowercase : int =tf_model(tf_model.dummy_inputs , training=__magic_name__ ) # build the network lowercase : int =torch.load(__magic_name__ , map_location='''cpu''' ) lowercase : Any =pt_model_class.from_pretrained( pretrained_model_name_or_path=__magic_name__ , config=__magic_name__ , state_dict=__magic_name__ ) with torch.no_grad(): lowercase : int =pt_model(**pt_model.dummy_inputs ) lowercase : int =pto[0].numpy() lowercase : int =tfo[0].numpy() lowercase : Dict =np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2E-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(__magic_name__ , save_format='''h5''' ) def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : Any=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : List[str]=False , __magic_name__ : Union[str, Any]=False , __magic_name__ : Tuple=False , __magic_name__ : str=False , ) -> Tuple: if args_model_type is None: lowercase : Union[str, Any] =list(MODEL_CLASSES.keys() ) else: lowercase : Optional[Any] =[args_model_type] for j, model_type in enumerate(__magic_name__ , start=1 ): print('''=''' * 100 ) print(f''' Converting model type {j}/{len(__magic_name__ )}: {model_type}''' ) print('''=''' * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase , lowercase , lowercase , lowercase , lowercase : str =MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase : List[Any] =list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase : int =model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(__magic_name__ , __magic_name__ ) , start=1 ): print('''-''' * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase : List[str] =model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(__magic_name__ )}: {model_shortcut_name} - model_type {model_type}''' ) print('''-''' * 100 ) if config_shortcut_name in aws_config_map: lowercase : int =cached_file(__magic_name__ , __magic_name__ , force_download=not use_cached_models ) else: lowercase : str =config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase : Optional[Any] =cached_file(__magic_name__ , __magic_name__ , force_download=not use_cached_models ) else: lowercase : Optional[int] =model_shortcut_name if os.path.isfile(__magic_name__ ): lowercase : Tuple ='''converted_model''' convert_pt_checkpoint_to_tf( model_type=__magic_name__ , pytorch_checkpoint_path=__magic_name__ , config_file=__magic_name__ , tf_dump_path=os.path.join(__magic_name__ , model_shortcut_name + '''-tf_model.h5''' ) , compare_with_pt_model=__magic_name__ , ) if remove_cached_files: os.remove(__magic_name__ ) os.remove(__magic_name__ ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ''' """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") UpperCamelCase_ = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
88
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _lowerCAmelCase ( ) -> List[Any]: lowercase : Tuple =HfArgumentParser(__magic_name__ ) lowercase : Union[str, Any] =parser.parse_args_into_dataclasses()[0] lowercase : Any =TensorFlowBenchmark(args=__magic_name__ ) try: lowercase : List[Any] =parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase : List[Any] ='''Arg --no_{0} is no longer used, please use --no-{0} instead.''' lowercase : Any =''' '''.join(str(__magic_name__ ).split(''' ''' )[:-1] ) lowercase : Optional[Any] ='''''' lowercase : List[str] =eval(str(__magic_name__ ).split(''' ''' )[-1] ) lowercase : Optional[Any] =[] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__magic_name__ ) if len(__magic_name__ ) > 0: lowercase : int =full_error_msg + begin_error_msg + str(__magic_name__ ) raise ValueError(__magic_name__ ) benchmark.run() if __name__ == "__main__": main()
88
1
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __magic_name__ : list[int] , __magic_name__ : int ) -> list[int]: lowercase : Optional[Any] =0 lowercase : str =len(__magic_name__ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: lowercase : Optional[Any] =i + 1 else: lowercase : Any =j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
88
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> bool: lowercase : str =len(__magic_name__ ) # We need to create solution object to save path. lowercase : int =[[0 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )] lowercase : List[Any] =run_maze(__magic_name__ , 0 , 0 , __magic_name__ ) if solved: print('''\n'''.join(str(__magic_name__ ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[list[int]] ) -> bool: lowercase : Optional[int] =len(__magic_name__ ) # Final check point. if i == j == (size - 1): lowercase : Optional[int] =1 return True lowercase : Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds lowercase : Tuple =(i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. lowercase : Union[str, Any] =(not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited lowercase : Union[str, Any] =1 # check for directions if ( run_maze(__magic_name__ , i + 1 , __magic_name__ , __magic_name__ ) or run_maze(__magic_name__ , __magic_name__ , j + 1 , __magic_name__ ) or run_maze(__magic_name__ , i - 1 , __magic_name__ , __magic_name__ ) or run_maze(__magic_name__ , __magic_name__ , j - 1 , __magic_name__ ) ): return True lowercase : str =0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
88
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): lowerCamelCase_ = ViTImageProcessor if is_vision_available() else None @property def lowerCamelCase_ ( self : Any ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Optional[int] =(3, 32, 128) lowercase : Union[str, Any] =tempfile.mkdtemp() # fmt: off lowercase : int =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on lowercase : Dict =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) lowercase : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) + '''\n''' ) lowercase : Union[str, Any] ={ '''do_normalize''': False, '''do_resize''': True, '''image_processor_type''': '''ViTImageProcessor''', '''resample''': 3, '''size''': {'''height''': 32, '''width''': 128}, } lowercase : str =os.path.join(self.tmpdirname , UpperCAmelCase__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int , **UpperCAmelCase__ : Any ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] , **UpperCAmelCase__ : Tuple ): '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : List[Any] =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) lowercase : str =Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) return image_input def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[int] =self.get_tokenizer() lowercase : Any =self.get_image_processor() lowercase : str =MgpstrProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) lowercase : List[str] =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase__ ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , UpperCAmelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[Any] =self.get_tokenizer() lowercase : List[Any] =self.get_image_processor() lowercase : List[Any] =MgpstrProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) lowercase : Union[str, Any] =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowercase : Dict =self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) lowercase : Dict =MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , UpperCAmelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase__ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : str =self.get_image_processor() lowercase : Tuple =self.get_tokenizer() lowercase : int =MgpstrProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) lowercase : str =self.prepare_image_inputs() lowercase : Dict =image_processor(UpperCAmelCase__ , return_tensors='''np''' ) lowercase : Union[str, Any] =processor(images=UpperCAmelCase__ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : List[str] =self.get_image_processor() lowercase : List[str] =self.get_tokenizer() lowercase : Tuple =MgpstrProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) lowercase : Dict ='''test''' lowercase : str =processor(text=UpperCAmelCase__ ) lowercase : Dict =tokenizer(UpperCAmelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Optional[int] =self.get_image_processor() lowercase : str =self.get_tokenizer() lowercase : Dict =MgpstrProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) lowercase : List[str] ='''test''' lowercase : Optional[Any] =self.prepare_image_inputs() lowercase : Optional[int] =processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase__ ): processor() def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Dict =self.get_image_processor() lowercase : str =self.get_tokenizer() lowercase : Optional[int] =MgpstrProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) lowercase : int =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] lowercase : List[str] =processor.char_decode(UpperCAmelCase__ ) lowercase : Tuple =tokenizer.batch_decode(UpperCAmelCase__ ) lowercase : Dict =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : List[str] =self.get_image_processor() lowercase : Union[str, Any] =self.get_tokenizer() lowercase : Dict =MgpstrProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) lowercase : str =None lowercase : Optional[Any] =self.prepare_image_inputs() lowercase : str =processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : str =self.get_image_processor() lowercase : List[str] =self.get_tokenizer() lowercase : str =MgpstrProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) lowercase : List[str] =torch.randn(1 , 27 , 38 ) lowercase : int =torch.randn(1 , 27 , 50257 ) lowercase : Optional[Any] =torch.randn(1 , 27 , 30522 ) lowercase : List[Any] =processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
88
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM lowercase : Any =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ ) @torch.no_grad() def __call__( self : List[Any] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ): '''simple docstring''' # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size , UpperCAmelCase__ ): lowercase : Optional[int] =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowercase : Optional[int] =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowercase : str =randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCAmelCase__ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowercase : Dict =self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowercase : Dict =self.scheduler.step( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample lowercase : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 ) lowercase : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase : List[str] =self.numpy_to_pil(UpperCAmelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase__ )
88
1
'''simple docstring''' import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml UpperCamelCase_ = NewType("""DataClass""", Any) UpperCamelCase_ = NewType("""DataClassType""", Any) def _lowerCAmelCase ( __magic_name__ : Dict ) -> str: if isinstance(__magic_name__ , __magic_name__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' ) def _lowerCAmelCase ( __magic_name__ : list ) -> Callable[[str], Any]: lowercase : Optional[Any] ={str(__magic_name__ ): choice for choice in choices} return lambda __magic_name__ : str_to_choice.get(__magic_name__ , __magic_name__ ) def _lowerCAmelCase ( *, __magic_name__ : Union[str, List[str]] = None , __magic_name__ : str = None , __magic_name__ : Any = dataclasses.MISSING , __magic_name__ : Callable[[], Any] = dataclasses.MISSING , __magic_name__ : dict = None , **__magic_name__ : Optional[Any] , ) -> dataclasses.Field: if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls lowercase : Tuple ={} if aliases is not None: lowercase : Dict =aliases if help is not None: lowercase : Optional[Any] =help return dataclasses.field(metadata=__magic_name__ , default=__magic_name__ , default_factory=__magic_name__ , **__magic_name__ ) class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 42 def __init__( self : Tuple , UpperCAmelCase__ : Union[DataClassType, Iterable[DataClassType]] , **UpperCAmelCase__ : Optional[int] ): '''simple docstring''' # To make the default appear when using --help if "formatter_class" not in kwargs: lowercase : Dict =ArgumentDefaultsHelpFormatter super().__init__(**UpperCAmelCase__ ) if dataclasses.is_dataclass(UpperCAmelCase__ ): lowercase : List[str] =[dataclass_types] lowercase : Any =list(UpperCAmelCase__ ) for dtype in self.dataclass_types: self._add_dataclass_arguments(UpperCAmelCase__ ) @staticmethod def lowerCamelCase_ ( UpperCAmelCase__ : ArgumentParser , UpperCAmelCase__ : dataclasses.Field ): '''simple docstring''' lowercase : Optional[int] =F'''--{field.name}''' lowercase : Optional[int] =field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , UpperCAmelCase__ ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) lowercase : Dict =kwargs.pop('''aliases''' , [] ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase : Dict =[aliases] lowercase : Union[str, Any] =getattr(field.type , '''__origin__''' , field.type ) if origin_type is Union or (hasattr(UpperCAmelCase__ , '''UnionType''' ) and isinstance(UpperCAmelCase__ , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(UpperCAmelCase__ ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' F''' Problem encountered in field \'{field.name}\'.''' ) if type(UpperCAmelCase__ ) not in field.type.__args__: # filter `str` in Union lowercase : Optional[int] =field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] lowercase : Union[str, Any] =getattr(field.type , '''__origin__''' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) lowercase : Dict =( field.type.__args__[0] if isinstance(UpperCAmelCase__ , field.type.__args__[1] ) else field.type.__args__[1] ) lowercase : Optional[int] =getattr(field.type , '''__origin__''' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) lowercase : Union[str, Any] ={} if origin_type is Literal or (isinstance(field.type , UpperCAmelCase__ ) and issubclass(field.type , UpperCAmelCase__ )): if origin_type is Literal: lowercase : Tuple =field.type.__args__ else: lowercase : Optional[Any] =[x.value for x in field.type] lowercase : str =make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: lowercase : List[Any] =field.default else: lowercase : List[str] =True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument lowercase : Optional[Any] =copy(UpperCAmelCase__ ) # Hack because type=bool in argparse does not behave as we want. lowercase : str =string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. lowercase : List[str] =False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way lowercase : int =default # This tells argparse we accept 0 or 1 value after --field_name lowercase : List[str] ='''?''' # This is the value that will get picked if we do --field_name (without value) lowercase : str =True elif isclass(UpperCAmelCase__ ) and issubclass(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase : int =field.type.__args__[0] lowercase : Optional[int] ='''+''' if field.default_factory is not dataclasses.MISSING: lowercase : List[str] =field.default_factory() elif field.default is dataclasses.MISSING: lowercase : Union[str, Any] =True else: lowercase : List[Any] =field.type if field.default is not dataclasses.MISSING: lowercase : Any =field.default elif field.default_factory is not dataclasses.MISSING: lowercase : Dict =field.default_factory() else: lowercase : List[str] =True parser.add_argument(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): lowercase : List[Any] =False parser.add_argument(F'''--no_{field.name}''' , action='''store_false''' , dest=field.name , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : DataClassType ): '''simple docstring''' if hasattr(UpperCAmelCase__ , '''_argument_group_name''' ): lowercase : List[Any] =self.add_argument_group(dtype._argument_group_name ) else: lowercase : str =self try: lowercase : Dict[str, type] =get_type_hints(UpperCAmelCase__ ) except NameError: raise RuntimeError( F'''Type resolution failed for {dtype}. Try declaring the class in global scope or ''' '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase__ ): lowercase : Dict ='''.'''.join(map(UpperCAmelCase__ , sys.version_info[:3] ) ) raise RuntimeError( F'''Type resolution failed for {dtype} on Python {python_version}. Try removing ''' '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(UpperCAmelCase__ ): if not field.init: continue lowercase : int =type_hints[field.name] self._parse_dataclass_field(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[Any]=None , ): '''simple docstring''' if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): lowercase : Optional[Any] =[] if args_filename: args_files.append(Path(UpperCAmelCase__ ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values lowercase : int =ArgumentParser() args_file_parser.add_argument(UpperCAmelCase__ , type=UpperCAmelCase__ , action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) lowercase , lowercase : Any =args_file_parser.parse_known_args(args=UpperCAmelCase__ ) lowercase : str =vars(UpperCAmelCase__ ).get(args_file_flag.lstrip('''-''' ) , UpperCAmelCase__ ) if cmd_args_file_paths: args_files.extend([Path(UpperCAmelCase__ ) for p in cmd_args_file_paths] ) lowercase : List[Any] =[] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last lowercase : Optional[Any] =file_args + args if args is not None else file_args + sys.argv[1:] lowercase , lowercase : Union[str, Any] =self.parse_known_args(args=UpperCAmelCase__ ) lowercase : Union[str, Any] =[] for dtype in self.dataclass_types: lowercase : int ={f.name for f in dataclasses.fields(UpperCAmelCase__ ) if f.init} lowercase : int ={k: v for k, v in vars(UpperCAmelCase__ ).items() if k in keys} for k in keys: delattr(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Union[str, Any] =dtype(**UpperCAmelCase__ ) outputs.append(UpperCAmelCase__ ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(UpperCAmelCase__ ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' ) return (*outputs,) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Dict[str, Any] , UpperCAmelCase__ : bool = False ): '''simple docstring''' lowercase : List[str] =set(args.keys() ) lowercase : Dict =[] for dtype in self.dataclass_types: lowercase : int ={f.name for f in dataclasses.fields(UpperCAmelCase__ ) if f.init} lowercase : Dict ={k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) lowercase : str =dtype(**UpperCAmelCase__ ) outputs.append(UpperCAmelCase__ ) if not allow_extra_keys and unused_keys: raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase__ )}''' ) return tuple(UpperCAmelCase__ ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False ): '''simple docstring''' with open(Path(UpperCAmelCase__ ) , encoding='''utf-8''' ) as open_json_file: lowercase : str =json.loads(open_json_file.read() ) lowercase : Tuple =self.parse_dict(UpperCAmelCase__ , allow_extra_keys=UpperCAmelCase__ ) return tuple(UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False ): '''simple docstring''' lowercase : Union[str, Any] =self.parse_dict(yaml.safe_load(Path(UpperCAmelCase__ ).read_text() ) , allow_extra_keys=UpperCAmelCase__ ) return tuple(UpperCAmelCase__ )
88
'''simple docstring''' import argparse import copy def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]: lowercase : int ={} with open(__magic_name__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: lowercase : List[str] =[] _list.append([line.split()[1], line.split()[2]] ) lowercase : Tuple =_list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: lowercase : List[Any] =[] _list.append([line.split()[0], line.split()[2]] ) lowercase : Union[str, Any] =_list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> str: with open(__magic_name__ ) as f: lowercase : Optional[int] =f.read(1 ) lowercase : List[Any] =start_node lowercase : List[Any] =[] lowercase : str =start_node lowercase : str =0 while visiting not in first_solution: lowercase : Optional[int] =10000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution: lowercase : List[Any] =k[1] lowercase : str =k[0] first_solution.append(__magic_name__ ) lowercase : Any =distance_of_first_solution + int(__magic_name__ ) lowercase : Optional[int] =best_node first_solution.append(__magic_name__ ) lowercase : str =0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 lowercase : str =( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10000 ) return first_solution, distance_of_first_solution def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Any ) -> Tuple: lowercase : Tuple =[] for n in solution[1:-1]: lowercase : Dict =solution.index(__magic_name__ ) for kn in solution[1:-1]: lowercase : Tuple =solution.index(__magic_name__ ) if n == kn: continue lowercase : Union[str, Any] =copy.deepcopy(__magic_name__ ) lowercase : Optional[int] =kn lowercase : List[Any] =n lowercase : List[Any] =0 for k in _tmp[:-1]: lowercase : Optional[int] =_tmp[_tmp.index(__magic_name__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: lowercase : Optional[int] =distance + int(i[1] ) _tmp.append(__magic_name__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) lowercase : Union[str, Any] =len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Union[str, Any]: lowercase : str =1 lowercase : List[Any] =first_solution lowercase : Any =[] lowercase : str =distance_of_first_solution lowercase : str =solution while count <= iters: lowercase : Union[str, Any] =find_neighborhood(__magic_name__ , __magic_name__ ) lowercase : Dict =0 lowercase : int =neighborhood[index_of_best_solution] lowercase : Optional[int] =len(__magic_name__ ) - 1 lowercase : List[Any] =False while not found: lowercase : List[Any] =0 while i < len(__magic_name__ ): if best_solution[i] != solution[i]: lowercase : List[str] =best_solution[i] lowercase : Dict =solution[i] break lowercase : Any =i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) lowercase : str =True lowercase : int =best_solution[:-1] lowercase : Any =neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: lowercase : Optional[int] =cost lowercase : str =solution else: lowercase : Optional[int] =index_of_best_solution + 1 lowercase : List[Any] =neighborhood[index_of_best_solution] if len(__magic_name__ ) >= size: tabu_list.pop(0 ) lowercase : Optional[int] =count + 1 return best_solution_ever, best_cost def _lowerCAmelCase ( __magic_name__ : str=None ) -> Tuple: lowercase : List[str] =generate_neighbours(args.File ) lowercase , lowercase : Optional[Any] =generate_first_solution( args.File , __magic_name__ ) lowercase , lowercase : int =tabu_search( __magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , ) print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""") parser.add_argument( """-f""", """--File""", type=str, help="""Path to the file containing the data""", required=True, ) parser.add_argument( """-i""", """--Iterations""", type=int, help="""How many iterations the algorithm should perform""", required=True, ) parser.add_argument( """-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True ) # Pass the arguments to main method main(parser.parse_args())
88
1
'''simple docstring''' import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict UpperCamelCase_ = namedtuple( """_TestCommandArgs""", [ """dataset""", """name""", """cache_dir""", """data_dir""", """all_configs""", """save_infos""", """ignore_verifications""", """force_redownload""", """clear_cache""", ], defaults=[None, None, None, False, False, False, False, False], ) def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Optional[int] ) -> Tuple: return (abs(source - target ) / target) < 0.0_1 @pytest.mark.integration def _lowerCAmelCase ( __magic_name__ : List[str] ) -> int: lowercase : str =_TestCommandArgs(dataset=__magic_name__ , all_configs=__magic_name__ , save_infos=__magic_name__ ) lowercase : Any =TestCommand(*__magic_name__ ) test_command.run() lowercase : Any =os.path.join(__magic_name__ , '''README.md''' ) assert os.path.exists(__magic_name__ ) lowercase : List[str] =DatasetInfosDict.from_directory(__magic_name__ ) lowercase : Tuple =DatasetInfosDict( { '''default''': DatasetInfo( features=Features( { '''tokens''': Sequence(Value('''string''' ) ), '''ner_tags''': Sequence( ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ), '''langs''': Sequence(Value('''string''' ) ), '''spans''': Sequence(Value('''string''' ) ), } ) , splits=[ { '''name''': '''train''', '''num_bytes''': 2351563, '''num_examples''': 10000, }, { '''name''': '''validation''', '''num_bytes''': 238418, '''num_examples''': 1000, }, ] , download_size=3940680 , dataset_size=2589981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: lowercase , lowercase : int =getattr(dataset_infos['''default'''] , __magic_name__ ), getattr(expected_dataset_infos['''default'''] , __magic_name__ ) if key == "num_bytes": assert is_apercent_close(__magic_name__ , __magic_name__ ) elif key == "splits": assert list(__magic_name__ ) == list(__magic_name__ ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
88
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int = 1000000 ) -> int: lowercase : Dict =set(range(3 , __magic_name__ , 2 ) ) primes.add(2 ) for p in range(3 , __magic_name__ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , __magic_name__ , __magic_name__ ) ) ) lowercase : List[Any] =[float(__magic_name__ ) for n in range(limit + 1 )] for p in primes: for n in range(__magic_name__ , limit + 1 , __magic_name__ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f'''{solution() = }''')
88
1
'''simple docstring''' import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : List[str] ) -> Optional[int]: # Initialise PyTorch model lowercase : Union[str, Any] =LxmertConfig.from_json_file(__magic_name__ ) print(f'''Building PyTorch model from configuration: {config}''' ) lowercase : List[Any] =LxmertForPreTraining(__magic_name__ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(__magic_name__ , __magic_name__ , __magic_name__ ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __magic_name__ ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCamelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
88
'''simple docstring''' import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = BioGptTokenizer lowerCamelCase_ = False def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase : List[str] =[ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] lowercase : Any =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) lowercase : Union[str, Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase__ ) ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Dict ='''lower newer''' lowercase : str ='''lower newer''' return input_text, output_text def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : List[Any] =BioGptTokenizer(self.vocab_file , self.merges_file ) lowercase : Any ='''lower''' lowercase : int =['''low''', '''er</w>'''] lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Optional[int] =tokens + ['''<unk>'''] lowercase : Any =[14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Dict =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) lowercase : List[str] =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ ) lowercase : Optional[int] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ ) lowercase : str =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) lowercase : Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
88
1
'''simple docstring''' import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 42 lowerCamelCase_ = jnp.floataa lowerCamelCase_ = True def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' super().setup() lowercase : Tuple =nn.Dense(5 , dtype=self.dtype ) def __call__( self : Optional[Any] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Any ): '''simple docstring''' lowercase : Optional[int] =super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : Any =self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = FlaxBigBirdForNaturalQuestionsModule def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Tuple ) -> List[str]: def cross_entropy(__magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Dict=None ): lowercase : Tuple =logits.shape[-1] lowercase : Optional[Any] =(labels[..., None] == jnp.arange(__magic_name__ )[None]).astype('''f4''' ) lowercase : Any =jax.nn.log_softmax(__magic_name__ , axis=-1 ) lowercase : Union[str, Any] =-jnp.sum(labels * logits , axis=-1 ) if reduction is not None: lowercase : List[Any] =reduction(__magic_name__ ) return loss lowercase : Optional[int] =partial(__magic_name__ , reduction=jnp.mean ) lowercase : Union[str, Any] =cross_entropy(__magic_name__ , __magic_name__ ) lowercase : List[Any] =cross_entropy(__magic_name__ , __magic_name__ ) lowercase : Tuple =cross_entropy(__magic_name__ , __magic_name__ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = "google/bigbird-roberta-base" lowerCamelCase_ = 30_00 lowerCamelCase_ = 1_05_00 lowerCamelCase_ = 1_28 lowerCamelCase_ = 3 lowerCamelCase_ = 1 lowerCamelCase_ = 5 # tx_args lowerCamelCase_ = 3E-5 lowerCamelCase_ = 0.0 lowerCamelCase_ = 2_00_00 lowerCamelCase_ = 0.0_0_9_5 lowerCamelCase_ = "bigbird-roberta-natural-questions" lowerCamelCase_ = "training-expt" lowerCamelCase_ = "data/nq-training.jsonl" lowerCamelCase_ = "data/nq-validation.jsonl" def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' os.makedirs(self.base_dir , exist_ok=UpperCAmelCase__ ) lowercase : Tuple =os.path.join(self.base_dir , self.save_dir ) lowercase : List[str] =self.batch_size_per_device * jax.device_count() @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 42 lowerCamelCase_ = 40_96 # no dynamic padding on TPUs def __call__( self : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : List[str] =self.collate_fn(UpperCAmelCase__ ) lowercase : Tuple =jax.tree_util.tree_map(UpperCAmelCase__ , UpperCAmelCase__ ) return batch def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase , lowercase : str =self.fetch_inputs(features['''input_ids'''] ) lowercase : Optional[int] ={ '''input_ids''': jnp.array(UpperCAmelCase__ , dtype=jnp.intaa ), '''attention_mask''': jnp.array(UpperCAmelCase__ , dtype=jnp.intaa ), '''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ), '''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ), '''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ), } return batch def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : list ): '''simple docstring''' lowercase : int =[self._fetch_inputs(UpperCAmelCase__ ) for ids in input_ids] return zip(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : list ): '''simple docstring''' lowercase : Tuple =[1 for _ in range(len(UpperCAmelCase__ ) )] while len(UpperCAmelCase__ ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str=None ) -> List[str]: if seed is not None: lowercase : Optional[int] =dataset.shuffle(seed=__magic_name__ ) for i in range(len(__magic_name__ ) // batch_size ): lowercase : str =dataset[i * batch_size : (i + 1) * batch_size] yield dict(__magic_name__ ) @partial(jax.pmap , axis_name='''batch''' ) def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : int , **__magic_name__ : str ) -> Optional[int]: def loss_fn(__magic_name__ : str ): lowercase : str =model_inputs.pop('''start_labels''' ) lowercase : Any =model_inputs.pop('''end_labels''' ) lowercase : Dict =model_inputs.pop('''pooled_labels''' ) lowercase : Dict =state.apply_fn(**__magic_name__ , params=__magic_name__ , dropout_rng=__magic_name__ , train=__magic_name__ ) lowercase , lowercase , lowercase : List[Any] =outputs return state.loss_fn( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) lowercase , lowercase : List[Any] =jax.random.split(__magic_name__ ) lowercase : Union[str, Any] =jax.value_and_grad(__magic_name__ ) lowercase , lowercase : Tuple =grad_fn(state.params ) lowercase : str =jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' ) lowercase : List[Any] =jax.lax.pmean(__magic_name__ , '''batch''' ) lowercase : str =state.apply_gradients(grads=__magic_name__ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name='''batch''' ) def _lowerCAmelCase ( __magic_name__ : str , **__magic_name__ : Optional[Any] ) -> int: lowercase : int =model_inputs.pop('''start_labels''' ) lowercase : Union[str, Any] =model_inputs.pop('''end_labels''' ) lowercase : int =model_inputs.pop('''pooled_labels''' ) lowercase : Dict =state.apply_fn(**__magic_name__ , params=state.params , train=__magic_name__ ) lowercase , lowercase , lowercase : Any =outputs lowercase : Optional[Any] =state.loss_fn(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) lowercase : Optional[Any] =jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' ) return metrics class __SCREAMING_SNAKE_CASE ( train_state.TrainState ): lowerCamelCase_ = struct.field(pytree_node=lowercase__ ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = None def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]=None ): '''simple docstring''' lowercase : Union[str, Any] =model.params lowercase : Optional[int] =TrainState.create( apply_fn=model.__call__ , params=UpperCAmelCase__ , tx=UpperCAmelCase__ , loss_fn=UpperCAmelCase__ , ) if ckpt_dir is not None: lowercase , lowercase , lowercase , lowercase , lowercase : List[str] =restore_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Optional[Any] ={ '''lr''': args.lr, '''init_lr''': args.init_lr, '''warmup_steps''': args.warmup_steps, '''num_train_steps''': num_train_steps, '''weight_decay''': args.weight_decay, } lowercase , lowercase : Optional[int] =build_tx(**UpperCAmelCase__ ) lowercase : str =train_state.TrainState( step=UpperCAmelCase__ , apply_fn=model.__call__ , params=UpperCAmelCase__ , tx=UpperCAmelCase__ , opt_state=UpperCAmelCase__ , ) lowercase : int =args lowercase : Dict =data_collator lowercase : Optional[int] =lr lowercase : int =params lowercase : Optional[Any] =jax_utils.replicate(UpperCAmelCase__ ) return state def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ): '''simple docstring''' lowercase : List[Any] =self.args lowercase : Dict =len(UpperCAmelCase__ ) // args.batch_size lowercase : str =jax.random.PRNGKey(0 ) lowercase : int =jax.random.split(UpperCAmelCase__ , jax.device_count() ) for epoch in range(args.max_epochs ): lowercase : Any =jnp.array(0 , dtype=jnp.floataa ) lowercase : Tuple =get_batched_dataset(UpperCAmelCase__ , args.batch_size , seed=UpperCAmelCase__ ) lowercase : Optional[Any] =0 for batch in tqdm(UpperCAmelCase__ , total=UpperCAmelCase__ , desc=F'''Running EPOCH-{epoch}''' ): lowercase : Union[str, Any] =self.data_collator(UpperCAmelCase__ ) lowercase , lowercase , lowercase : List[str] =self.train_step_fn(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ) running_loss += jax_utils.unreplicate(metrics['''loss'''] ) i += 1 if i % args.logging_steps == 0: lowercase : Optional[int] =jax_utils.unreplicate(state.step ) lowercase : str =running_loss.item() / i lowercase : Optional[Any] =self.scheduler_fn(state_step - 1 ) lowercase : List[str] =self.evaluate(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Any ={ '''step''': state_step.item(), '''eval_loss''': eval_loss.item(), '''tr_loss''': tr_loss, '''lr''': lr.item(), } tqdm.write(str(UpperCAmelCase__ ) ) self.logger.log(UpperCAmelCase__ , commit=UpperCAmelCase__ ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' , state=UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ): '''simple docstring''' lowercase : Dict =get_batched_dataset(UpperCAmelCase__ , self.args.batch_size ) lowercase : int =len(UpperCAmelCase__ ) // self.args.batch_size lowercase : str =jnp.array(0 , dtype=jnp.floataa ) lowercase : Dict =0 for batch in tqdm(UpperCAmelCase__ , total=UpperCAmelCase__ , desc='''Evaluating ... ''' ): lowercase : Dict =self.data_collator(UpperCAmelCase__ ) lowercase : List[Any] =self.val_step_fn(UpperCAmelCase__ , **UpperCAmelCase__ ) running_loss += jax_utils.unreplicate(metrics['''loss'''] ) i += 1 return running_loss / i def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : Tuple =jax_utils.unreplicate(UpperCAmelCase__ ) print(F'''SAVING CHECKPOINT IN {save_dir}''' , end=''' ... ''' ) self.model_save_fn(UpperCAmelCase__ , params=state.params ) with open(os.path.join(UpperCAmelCase__ , '''opt_state.msgpack''' ) , '''wb''' ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(UpperCAmelCase__ , '''args.joblib''' ) ) joblib.dump(self.data_collator , os.path.join(UpperCAmelCase__ , '''data_collator.joblib''' ) ) with open(os.path.join(UpperCAmelCase__ , '''training_state.json''' ) , '''w''' ) as f: json.dump({'''step''': state.step.item()} , UpperCAmelCase__ ) print('''DONE''' ) def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> Optional[Any]: print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=''' ... ''' ) with open(os.path.join(__magic_name__ , '''flax_model.msgpack''' ) , '''rb''' ) as f: lowercase : int =from_bytes(state.params , f.read() ) with open(os.path.join(__magic_name__ , '''opt_state.msgpack''' ) , '''rb''' ) as f: lowercase : Union[str, Any] =from_bytes(state.opt_state , f.read() ) lowercase : List[str] =joblib.load(os.path.join(__magic_name__ , '''args.joblib''' ) ) lowercase : Any =joblib.load(os.path.join(__magic_name__ , '''data_collator.joblib''' ) ) with open(os.path.join(__magic_name__ , '''training_state.json''' ) , '''r''' ) as f: lowercase : Any =json.load(__magic_name__ ) lowercase : Dict =training_state['''step'''] print('''DONE''' ) return params, opt_state, step, args, data_collator def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str ) -> str: lowercase : str =num_train_steps - warmup_steps lowercase : Dict =optax.linear_schedule(init_value=__magic_name__ , end_value=__magic_name__ , transition_steps=__magic_name__ ) lowercase : Tuple =optax.linear_schedule(init_value=__magic_name__ , end_value=1E-7 , transition_steps=__magic_name__ ) lowercase : List[str] =optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : List[str] ) -> List[Any]: def weight_decay_mask(__magic_name__ : Any ): lowercase : Tuple =traverse_util.flatten_dict(__magic_name__ ) lowercase : Dict ={k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()} return traverse_util.unflatten_dict(__magic_name__ ) lowercase : str =scheduler_fn(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) lowercase : str =optax.adamw(learning_rate=__magic_name__ , weight_decay=__magic_name__ , mask=__magic_name__ ) return tx, lr
88
'''simple docstring''' import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=4 , ): '''simple docstring''' lowercase : int =parent lowercase : List[str] =batch_size lowercase : str =seq_length lowercase : Optional[Any] =is_training lowercase : Union[str, Any] =use_attention_mask lowercase : Optional[Any] =use_token_type_ids lowercase : Tuple =use_labels lowercase : List[str] =vocab_size lowercase : List[str] =hidden_size lowercase : Tuple =num_hidden_layers lowercase : Any =num_attention_heads lowercase : List[str] =intermediate_size lowercase : Optional[Any] =hidden_act lowercase : Dict =hidden_dropout_prob lowercase : List[Any] =attention_probs_dropout_prob lowercase : Optional[Any] =max_position_embeddings lowercase : Tuple =type_vocab_size lowercase : Optional[int] =type_sequence_label_size lowercase : Optional[Any] =initializer_range lowercase : Optional[int] =num_choices def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Union[str, Any] =None if self.use_attention_mask: lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) lowercase : Tuple =None if self.use_token_type_ids: lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase : int =RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : List[Any] =self.prepare_config_and_inputs() lowercase , lowercase , lowercase , lowercase : str =config_and_inputs lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : List[str] =self.prepare_config_and_inputs() lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs lowercase : List[str] =True lowercase : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = True lowerCamelCase_ = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =FlaxRobertaModelTester(self ) @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase : Optional[int] =model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCAmelCase__ ) lowercase : List[Any] =model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase__ )
88
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = KandinskyVaaControlnetImgaImgPipeline lowerCamelCase_ = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] lowerCamelCase_ = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] lowerCamelCase_ = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] lowerCamelCase_ = False @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return 32 @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return 32 @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return self.time_input_dim @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return 100 @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' torch.manual_seed(0 ) lowercase : Union[str, Any] ={ '''in_channels''': 8, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image_hint''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } lowercase : Any =UNetaDConditionModel(**UpperCAmelCase__ ) return model @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' torch.manual_seed(0 ) lowercase : str =VQModel(**self.dummy_movq_kwargs ) return model def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Dict =self.dummy_unet lowercase : List[str] =self.dummy_movq lowercase : str ={ '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_00_85, '''beta_end''': 0.0_12, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } lowercase : Union[str, Any] =DDIMScheduler(**UpperCAmelCase__ ) lowercase : Union[str, Any] ={ '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any=0 ): '''simple docstring''' lowercase : str =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ ) lowercase : List[str] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( UpperCAmelCase__ ) # create init_image lowercase : Union[str, Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ ) lowercase : str =image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase : Union[str, Any] =Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('''RGB''' ).resize((256, 256) ) # create hint lowercase : Dict =floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ ) if str(UpperCAmelCase__ ).startswith('''mps''' ): lowercase : List[str] =torch.manual_seed(UpperCAmelCase__ ) else: lowercase : Optional[int] =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) lowercase : int ={ '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''hint''': hint, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : List[Any] ='''cpu''' lowercase : Any =self.get_dummy_components() lowercase : List[Any] =self.pipeline_class(**UpperCAmelCase__ ) lowercase : Union[str, Any] =pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) lowercase : List[str] =pipe(**self.get_dummy_inputs(UpperCAmelCase__ ) ) lowercase : List[str] =output.images lowercase : Any =pipe( **self.get_dummy_inputs(UpperCAmelCase__ ) , return_dict=UpperCAmelCase__ , )[0] lowercase : Union[str, Any] =image[0, -3:, -3:, -1] lowercase : str =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase : Tuple =np.array( [0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Any =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' ) lowercase : Union[str, Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) lowercase : List[Any] =init_image.resize((512, 512) ) lowercase : List[Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/hint_image_cat.png''' ) lowercase : List[str] =torch.from_numpy(np.array(UpperCAmelCase__ ) ).float() / 2_55.0 lowercase : Any =hint.permute(2 , 0 , 1 ).unsqueeze(0 ) lowercase : Tuple ='''A robot, 4k photo''' lowercase : Optional[int] =KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(UpperCAmelCase__ ) lowercase : str =KandinskyVaaControlnetImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa ) lowercase : Union[str, Any] =pipeline.to(UpperCAmelCase__ ) pipeline.set_progress_bar_config(disable=UpperCAmelCase__ ) lowercase : List[str] =torch.Generator(device='''cpu''' ).manual_seed(0 ) lowercase , lowercase : Tuple =pipe_prior( UpperCAmelCase__ , image=UpperCAmelCase__ , strength=0.85 , generator=UpperCAmelCase__ , negative_prompt='''''' , ).to_tuple() lowercase : Optional[int] =pipeline( image=UpperCAmelCase__ , image_embeds=UpperCAmelCase__ , negative_image_embeds=UpperCAmelCase__ , hint=UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , ) lowercase : Any =output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(UpperCAmelCase__ , UpperCAmelCase__ )
88
'''simple docstring''' import mpmath # for roots of unity import numpy as np class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None ): '''simple docstring''' # Input as list lowercase : Optional[int] =list(poly_a or [0] )[:] lowercase : Optional[Any] =list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() lowercase : Any =len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() lowercase : Dict =len(self.polyB ) # Add 0 to make lengths equal a power of 2 lowercase : int =int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform lowercase : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product lowercase : Tuple =self.__multiply() def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Union[str, Any] =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB] # Corner case if len(UpperCAmelCase__ ) <= 1: return dft[0] # lowercase : Any =self.c_max_length // 2 while next_ncol > 0: lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )] lowercase : Tuple =self.root**next_ncol # First half of next step lowercase : str =1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(UpperCAmelCase__ ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step lowercase : int =1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(UpperCAmelCase__ ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update lowercase : Dict =new_dft lowercase : Tuple =next_ncol // 2 return dft[0] def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Any =self.__dft('''A''' ) lowercase : Any =self.__dft('''B''' ) lowercase : Optional[int] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT lowercase : Optional[int] =2 while next_ncol <= self.c_max_length: lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )] lowercase : List[str] =self.root ** (next_ncol // 2) lowercase : Optional[int] =1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update lowercase : List[Any] =new_inverse_c next_ncol *= 2 # Unpack lowercase : Tuple =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : Any ): '''simple docstring''' lowercase : Any ='''A = ''' + ''' + '''.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) lowercase : Tuple ='''B = ''' + ''' + '''.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) lowercase : List[str] ='''A*B = ''' + ''' + '''.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return F'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
88
1
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : int ) -> list: lowercase : Tuple =word.split() def justify(__magic_name__ : list , __magic_name__ : int , __magic_name__ : int ) -> str: lowercase : Union[str, Any] =max_width - width lowercase : int =len(__magic_name__ ) if len(__magic_name__ ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: lowercase : str =words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] lowercase : Optional[int] =spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] lowercase : Optional[int] =( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(__magic_name__ ): num_spaces_between_words_list[i] += 1 lowercase : List[Any] =[] for i in range(__magic_name__ ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(__magic_name__ ) lowercase : str =[] lowercase : list[str] =[] lowercase : Tuple =0 for word in words: if width + len(__magic_name__ ) + len(__magic_name__ ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(__magic_name__ ) width += len(__magic_name__ ) else: # justify the line and add it to result answer.append(justify(__magic_name__ , __magic_name__ , __magic_name__ ) ) # reset new line and new width lowercase , lowercase : List[Any] =[word], len(__magic_name__ ) lowercase : List[Any] =max_width - width - len(__magic_name__ ) answer.append(''' '''.join(__magic_name__ ) + (remaining_spaces + 1) * ''' ''' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
88
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""PLBartTokenizer"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """PLBartForCausalLM""", """PLBartForConditionalGeneration""", """PLBartForSequenceClassification""", """PLBartModel""", """PLBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
88
1
'''simple docstring''' import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase_ = """▁""" UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = BigBirdTokenizer lowerCamelCase_ = BigBirdTokenizerFast lowerCamelCase_ = True lowerCamelCase_ = True def lowerCamelCase_ ( self : Any ): '''simple docstring''' super().setUp() lowercase : Optional[int] =self.tokenizer_class(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[int] ='''<s>''' lowercase : int =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Dict =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''[MASK]''' ) self.assertEqual(len(UpperCAmelCase__ ) , 1004 ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' if not self.test_rust_tokenizer: return lowercase : Optional[int] =self.get_tokenizer() lowercase : Any =self.get_rust_tokenizer() lowercase : int ='''I was born in 92000, and this is falsé.''' lowercase : List[str] =tokenizer.tokenize(UpperCAmelCase__ ) lowercase : Dict =rust_tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : str =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Optional[Any] =self.get_rust_tokenizer() lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ ) lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Tuple =BigBirdTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) lowercase : Tuple =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , ) lowercase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) lowercase : Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) lowercase : List[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def lowerCamelCase_ ( self : str ): '''simple docstring''' return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : str ='''Hello World!''' lowercase : Union[str, Any] =[65, 18536, 2260, 101, 66] self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @slow def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : int =( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) # fmt: off lowercase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231 # fmt: on self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @require_torch @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence lowercase : List[str] =list(self.big_tokenizer.get_vocab().keys() )[:10] lowercase : Dict =''' '''.join(UpperCAmelCase__ ) lowercase : Union[str, Any] =self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ ) lowercase : Dict =self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ ) lowercase : Optional[int] =BigBirdConfig(attention_type='''original_full''' ) lowercase : Dict =BigBirdModel(UpperCAmelCase__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**UpperCAmelCase__ ) model(**UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Union[str, Any] =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) lowercase : Dict =tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids ) self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' ) @slow def lowerCamelCase_ ( self : int ): '''simple docstring''' # fmt: off lowercase : str ={'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
88
'''simple docstring''' import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'vision-encoder-decoder' lowerCamelCase_ = True def __init__( self : Optional[int] , **UpperCAmelCase__ : Tuple ): '''simple docstring''' super().__init__(**UpperCAmelCase__ ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( F'''A configuraton of type {self.model_type} cannot be instantiated because ''' F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) lowercase : Optional[Any] =kwargs.pop('''encoder''' ) lowercase : List[Any] =encoder_config.pop('''model_type''' ) lowercase : List[str] =kwargs.pop('''decoder''' ) lowercase : Dict =decoder_config.pop('''model_type''' ) lowercase : Union[str, Any] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : List[str] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : str =True @classmethod def lowerCamelCase_ ( cls : List[str] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Optional[int] ): '''simple docstring''' logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowercase : int =True lowercase : Optional[Any] =True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int =copy.deepcopy(self.__dict__ ) lowercase : Union[str, Any] =self.encoder.to_dict() lowercase : Union[str, Any] =self.decoder.to_dict() lowercase : int =self.__class__.model_type return output class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = version.parse('1.11' ) @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return 1E-4 @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} ) class __SCREAMING_SNAKE_CASE ( lowercase__ ): @property def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : List[str] =OrderedDict() lowercase : Tuple ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''} lowercase : Optional[int] ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''} lowercase : int ={0: '''batch''', 1: '''encoder_sequence'''} return common_inputs def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , ): '''simple docstring''' import torch lowercase : Optional[Any] =OrderedDict() lowercase : List[Any] =super().generate_dummy_inputs( UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ ) lowercase , lowercase : Optional[int] =dummy_input['''input_ids'''].shape lowercase : Union[str, Any] =(batch, encoder_sequence, self._config.encoder_hidden_size) lowercase : List[str] =dummy_input.pop('''input_ids''' ) lowercase : Tuple =dummy_input.pop('''attention_mask''' ) lowercase : Union[str, Any] =torch.zeros(UpperCAmelCase__ ) return common_inputs class __SCREAMING_SNAKE_CASE ( lowercase__ ): @property def lowerCamelCase_ ( self : int ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : PretrainedConfig ): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" ): '''simple docstring''' lowercase : List[Any] =encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase__ , UpperCAmelCase__ )
88
1
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int]=13 , UpperCAmelCase__ : Tuple=30 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : Any=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : int=37 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : str=10 , UpperCAmelCase__ : Dict=0.02 , ): '''simple docstring''' lowercase : Optional[Any] =parent lowercase : Optional[int] =batch_size lowercase : List[str] =image_size lowercase : Dict =patch_size lowercase : int =num_channels lowercase : Any =is_training lowercase : Optional[Any] =use_labels lowercase : Any =hidden_size lowercase : int =num_hidden_layers lowercase : Optional[int] =num_attention_heads lowercase : Any =intermediate_size lowercase : Dict =hidden_act lowercase : List[str] =hidden_dropout_prob lowercase : str =attention_probs_dropout_prob lowercase : Optional[int] =type_sequence_label_size lowercase : str =initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase : int =(image_size // patch_size) ** 2 lowercase : Dict =num_patches + 1 def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : int =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : Tuple =ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) return config, pixel_values def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Optional[Any] =FlaxViTModel(config=UpperCAmelCase__ ) lowercase : Any =model(UpperCAmelCase__ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) lowercase : List[Any] =(self.image_size, self.image_size) lowercase : Tuple =(self.patch_size, self.patch_size) lowercase : str =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : Union[str, Any] =self.type_sequence_label_size lowercase : Dict =FlaxViTForImageClassification(config=UpperCAmelCase__ ) lowercase : Union[str, Any] =model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase : Tuple =1 lowercase : Any =FlaxViTForImageClassification(UpperCAmelCase__ ) lowercase : Optional[int] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase : Optional[Any] =model(UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : str =self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ) : Optional[Any] =config_and_inputs lowercase : List[Any] ={'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Optional[Any] =FlaxViTModelTester(self ) lowercase : List[str] =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase , lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Dict =model_class(UpperCAmelCase__ ) lowercase : Tuple =inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase : Tuple =[*signature.parameters.keys()] lowercase : List[str] =['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase : Dict =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Union[str, Any] =model_class(UpperCAmelCase__ ) @jax.jit def model_jitted(UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Dict ): return model(pixel_values=UpperCAmelCase__ , **UpperCAmelCase__ ) with self.subTest('''JIT Enabled''' ): lowercase : Dict =model_jitted(**UpperCAmelCase__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowercase : Tuple =model_jitted(**UpperCAmelCase__ ).to_tuple() self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase : Union[str, Any] =model_class_name.from_pretrained('''google/vit-base-patch16-224''' ) lowercase : Tuple =model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(UpperCAmelCase__ )
88
'''simple docstring''' import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) UpperCamelCase_ = logging.getLogger(__name__) UpperCamelCase_ = tf.data.AUTOTUNE def _lowerCAmelCase ( ) -> Any: lowercase : Dict =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' ) parser.add_argument( '''--pretrained_model_config''' , type=__magic_name__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , ) parser.add_argument( '''--tokenizer''' , type=__magic_name__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , ) parser.add_argument( '''--per_replica_batch_size''' , type=__magic_name__ , default=8 , help='''Batch size per TPU core.''' , ) parser.add_argument( '''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , ) parser.add_argument( '''--tpu_name''' , type=__magic_name__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , ) parser.add_argument( '''--tpu_zone''' , type=__magic_name__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , ) parser.add_argument( '''--gcp_project''' , type=__magic_name__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' ) parser.add_argument( '''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , ) parser.add_argument( '''--train_dataset''' , type=__magic_name__ , help='''Path to training dataset to load. If the path begins with `gs://`''' ''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , ) parser.add_argument( '''--shuffle_buffer_size''' , type=__magic_name__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , ) parser.add_argument( '''--eval_dataset''' , type=__magic_name__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`''' ''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , ) parser.add_argument( '''--num_epochs''' , type=__magic_name__ , default=1 , help='''Number of epochs to train for.''' , ) parser.add_argument( '''--learning_rate''' , type=__magic_name__ , default=1E-4 , help='''Learning rate to use for training.''' , ) parser.add_argument( '''--weight_decay_rate''' , type=__magic_name__ , default=1E-3 , help='''Weight decay rate to use for training.''' , ) parser.add_argument( '''--max_length''' , type=__magic_name__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , ) parser.add_argument( '''--mlm_probability''' , type=__magic_name__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , ) parser.add_argument('''--output_dir''' , type=__magic_name__ , required=__magic_name__ , help='''Path to save model checkpoints to.''' ) parser.add_argument('''--hub_model_id''' , type=__magic_name__ , help='''Model ID to upload to on the Hugging Face Hub.''' ) lowercase : Union[str, Any] =parser.parse_args() return args def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[Any]: try: if args.tpu_name: lowercase : Dict =tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: lowercase : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( '''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or ''' '''--gcp_project. When running on a TPU VM, use --tpu_name local.''' ) tf.config.experimental_connect_to_cluster(__magic_name__ ) tf.tpu.experimental.initialize_tpu_system(__magic_name__ ) return tpu def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]: lowercase : str =0 for file in file_list: lowercase : List[str] =file.split('''/''' )[-1] lowercase : Union[str, Any] =re.search(R'''-\d+-(\d+)\.tfrecord''' , __magic_name__ ).group(1 ) lowercase : int =int(__magic_name__ ) num_samples += sample_count return num_samples def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> str: lowercase : int =count_samples(__magic_name__ ) lowercase : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__magic_name__ ) if shuffle: lowercase : Union[str, Any] =dataset.shuffle(len(__magic_name__ ) ) lowercase : Any =tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here lowercase : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) ) lowercase : str =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ ) if shuffle: assert shuffle_buffer_size is not None lowercase : int =dataset.shuffle(args.shuffle_buffer_size ) lowercase : Optional[int] =dataset.batch(__magic_name__ , drop_remainder=__magic_name__ ) lowercase : int =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ ) lowercase : Union[str, Any] =dataset.prefetch(__magic_name__ ) return dataset def _lowerCAmelCase ( __magic_name__ : Any ) -> str: if not args.no_tpu: lowercase : Optional[Any] =initialize_tpu(__magic_name__ ) lowercase : Any =tf.distribute.TPUStrategy(__magic_name__ ) else: lowercase : Optional[Any] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' ) lowercase : Any =AutoTokenizer.from_pretrained(args.tokenizer ) lowercase : Union[str, Any] =AutoConfig.from_pretrained(args.pretrained_model_config ) lowercase : Optional[Any] =tokenizer.vocab_size lowercase : str =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) ) if not training_records: raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' ) lowercase : Optional[int] =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) ) if not eval_records: raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' ) lowercase : Any =count_samples(__magic_name__ ) lowercase : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) lowercase : Union[str, Any] =steps_per_epoch * args.num_epochs with strategy.scope(): lowercase : List[Any] =TFAutoModelForMaskedLM.from_config(__magic_name__ ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built lowercase , lowercase : Dict =create_optimizer( num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=__magic_name__ , metrics=['''accuracy'''] ) def decode_fn(__magic_name__ : Optional[Any] ): lowercase : Union[str, Any] ={ '''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), '''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(__magic_name__ , __magic_name__ ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. lowercase : str =DataCollatorForLanguageModeling( tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='''tf''' ) def mask_with_collator(__magic_name__ : Dict ): # TF really needs an isin() function lowercase : int =( ~tf.cast(batch['''attention_mask'''] , tf.bool ) | (batch['''input_ids'''] == tokenizer.cls_token_id) | (batch['''input_ids'''] == tokenizer.sep_token_id) ) lowercase , lowercase : Union[str, Any] =data_collator.tf_mask_tokens( batch['''input_ids'''] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , ) return batch lowercase : List[str] =args.per_replica_batch_size * strategy.num_replicas_in_sync lowercase : Dict =prepare_dataset( __magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , ) lowercase : Union[str, Any] =prepare_dataset( __magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , ) lowercase : Tuple =[] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) ) model.fit( __magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": UpperCamelCase_ = parse_args() main(args)
88
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: UpperCamelCase_ = None UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase_ = { """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", }, """tokenizer_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""", }, } UpperCamelCase_ = { """xlnet-base-cased""": None, """xlnet-large-cased""": None, } UpperCamelCase_ = """▁""" # Segments (not really needed) UpperCamelCase_ = 0 UpperCamelCase_ = 1 UpperCamelCase_ = 2 UpperCamelCase_ = 3 UpperCamelCase_ = 4 class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = VOCAB_FILES_NAMES lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ = 'left' lowerCamelCase_ = XLNetTokenizer def __init__( self : Dict , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : Union[str, Any]="</s>" , UpperCAmelCase__ : Dict="<unk>" , UpperCAmelCase__ : Optional[Any]="<sep>" , UpperCAmelCase__ : Union[str, Any]="<pad>" , UpperCAmelCase__ : Optional[Any]="<cls>" , UpperCAmelCase__ : Optional[int]="<mask>" , UpperCAmelCase__ : Dict=["<eop>", "<eod>"] , **UpperCAmelCase__ : Union[str, Any] , ): '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it lowercase : List[Any] =AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token super().__init__( vocab_file=UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , ) lowercase : List[Any] =3 lowercase : Tuple =do_lower_case lowercase : Tuple =remove_space lowercase : Optional[Any] =keep_accents lowercase : Any =vocab_file lowercase : Optional[Any] =False if not self.vocab_file else True def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase : Optional[Any] =[self.sep_token_id] lowercase : Union[str, Any] =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase : str =[self.sep_token_id] lowercase : Any =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCAmelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase : List[Any] =os.path.join( UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ): copyfile(self.vocab_file , UpperCAmelCase__ ) return (out_vocab_file,)
88
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys UpperCamelCase_ = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
88
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : List[Any]=False ) -> Any: lowercase : List[str] =[] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowercase : Union[str, Any] =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : str=False ) -> List[str]: for i in range(config.num_hidden_layers ): if base_model: lowercase : int ='''''' else: lowercase : Tuple ='''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase : Optional[int] =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) lowercase : str =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowercase : str =in_proj_weight[ : config.hidden_size, : ] lowercase : Union[str, Any] =in_proj_bias[: config.hidden_size] lowercase : Union[str, Any] =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase : Union[str, Any] =in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase : str =in_proj_weight[ -config.hidden_size :, : ] lowercase : List[Any] =in_proj_bias[-config.hidden_size :] def _lowerCAmelCase ( __magic_name__ : Any ) -> int: lowercase : Any =['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__magic_name__ , __magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]: lowercase : Optional[Any] =dct.pop(__magic_name__ ) lowercase : Optional[Any] =val def _lowerCAmelCase ( ) -> Dict: lowercase : List[Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase : List[str] =Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) return im @torch.no_grad() def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : str=True ) -> Dict: lowercase : Optional[int] =ViTConfig() # patch_size if model_name[-1] == "8": lowercase : Dict =8 # set labels if required if not base_model: lowercase : Union[str, Any] =1000 lowercase : str ='''huggingface/label-files''' lowercase : Tuple ='''imagenet-1k-id2label.json''' lowercase : Optional[int] =json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) ) lowercase : Tuple ={int(__magic_name__ ): v for k, v in idalabel.items()} lowercase : Optional[int] =idalabel lowercase : Union[str, Any] ={v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: lowercase : Dict =384 lowercase : Tuple =1536 lowercase : Tuple =12 lowercase : List[Any] =6 # load original model from torch hub lowercase : List[str] =torch.hub.load('''facebookresearch/dino:main''' , __magic_name__ ) original_model.eval() # load state_dict of original model, remove and rename some keys lowercase : Dict =original_model.state_dict() if base_model: remove_classification_head_(__magic_name__ ) lowercase : Union[str, Any] =create_rename_keys(__magic_name__ , base_model=__magic_name__ ) for src, dest in rename_keys: rename_key(__magic_name__ , __magic_name__ , __magic_name__ ) read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ ) # load HuggingFace model if base_model: lowercase : List[str] =ViTModel(__magic_name__ , add_pooling_layer=__magic_name__ ).eval() else: lowercase : Dict =ViTForImageClassification(__magic_name__ ).eval() model.load_state_dict(__magic_name__ ) # Check outputs on an image, prepared by ViTImageProcessor lowercase : List[str] =ViTImageProcessor() lowercase : str =image_processor(images=prepare_img() , return_tensors='''pt''' ) lowercase : Optional[int] =encoding['''pixel_values'''] lowercase : List[Any] =model(__magic_name__ ) if base_model: lowercase : Dict =original_model(__magic_name__ ) assert torch.allclose(__magic_name__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 ) else: lowercase : str =original_model(__magic_name__ ) assert logits.shape == outputs.logits.shape assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__magic_name__ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__magic_name__ ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""dino_vitb16""", type=str, help="""Name of the model trained with DINO you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--base_model""", action="""store_true""", help="""Whether to only convert the base model (no projection head weights).""", ) parser.set_defaults(base_model=True) UpperCamelCase_ = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
88
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = """▁""" UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""} UpperCamelCase_ = { """vocab_file""": { """facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""", } } UpperCamelCase_ = { """facebook/xglm-564M""": 2048, } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = VOCAB_FILES_NAMES lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ = ['input_ids', 'attention_mask'] def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ): '''simple docstring''' lowercase : int ={} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer lowercase : Optional[Any] =7 lowercase : Optional[int] =[F'''<madeupword{i}>''' for i in range(self.num_madeup_words )] lowercase : List[Any] =kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , ) lowercase : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCAmelCase__ ) ) lowercase : List[Any] =vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase : Union[str, Any] =1 # Mimic fairseq token-to-id alignment for the first 4 token lowercase : List[str] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} lowercase : str =len(self.sp_model ) lowercase : List[Any] ={F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(UpperCAmelCase__ ) lowercase : int ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : int ): '''simple docstring''' lowercase : Optional[int] =self.__dict__.copy() lowercase : List[Any] =None lowercase : Tuple =self.sp_model.serialized_model_proto() return state def __setstate__( self : Optional[Any] , UpperCAmelCase__ : Any ): '''simple docstring''' lowercase : int =d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase : Optional[int] ={} lowercase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a lowercase : List[Any] =[self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase__ )) return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ )) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase : int =[self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : int ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ): '''simple docstring''' return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase : List[str] =self.sp_model.PieceToId(UpperCAmelCase__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' lowercase : Dict =''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , ''' ''' ).strip() return out_string def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(UpperCAmelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase : Dict =os.path.join( UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase__ , '''wb''' ) as fi: lowercase : Optional[int] =self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase__ ) return (out_vocab_file,)
88
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM lowercase : Any =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ ) @torch.no_grad() def __call__( self : List[Any] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ): '''simple docstring''' # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size , UpperCAmelCase__ ): lowercase : Optional[int] =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowercase : Optional[int] =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowercase : str =randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCAmelCase__ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowercase : Dict =self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowercase : Dict =self.scheduler.step( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample lowercase : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 ) lowercase : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase : List[str] =self.numpy_to_pil(UpperCAmelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase__ )
88
'''simple docstring''' import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def _lowerCAmelCase ( __magic_name__ : str ) -> Union[str, Any]: lowercase : Union[str, Any] =os.path.join(args.tf_model_dir , '''parameters.json''' ) lowercase : List[str] =json.loads(open(__magic_name__ ).read() ) if not params: raise ValueError( f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' ) if not args.output.endswith('''.pt''' ): lowercase : Tuple =args.output + '''.pt''' lowercase : int =OrderedDict() with tf.device('''/CPU:0''' ): lowercase : List[Any] =tf.train.load_checkpoint(args.tf_model_dir ) lowercase : int =reader.get_variable_to_shape_map() for key_name in shapes.keys(): lowercase : Any =reader.get_tensor(__magic_name__ ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): lowercase : int =int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): lowercase : Union[str, Any] =8 lowercase : Any ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time lowercase : Dict =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase : List[str] =torch.tensor(__magic_name__ ) elif key_name.startswith('''model/moe''' ): lowercase : Union[str, Any] =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player lowercase : Any =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase : List[Any] =torch.tensor(__magic_name__ ) elif key_name.endswith('''/softmlp/kernel''' ): lowercase : Optional[int] ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase : Optional[Any] =torch.tensor(__magic_name__ ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): lowercase : Union[str, Any] =key_name[-9:-7] for i in range(16 ): lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) lowercase : Any =( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided lowercase : List[str] =torch.tensor(__magic_name__ ) elif key_name.startswith('''model/mlp''' ): lowercase : Dict =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): lowercase : Any ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player lowercase : str =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase : Any =torch.tensor(__magic_name__ ) elif key_name.endswith('''/p1/bias''' ): lowercase : List[Any] ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional lowercase : Union[str, Any] =torch.tensor(__magic_name__ ) elif key_name.endswith('''/p2/kernel''' ): lowercase : int ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player lowercase : Tuple =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase : int =torch.tensor(__magic_name__ ) elif key_name.endswith('''/p2/bias''' ): lowercase : str ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player lowercase : Optional[int] =vnp.copy() # same because it is one dimensional lowercase : List[Any] =torch.tensor(__magic_name__ ) elif key_name.startswith('''model/ln''' ): lowercase : int =int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase : Any ='''model.blocks.%d.feed_forward.norm.bias''' % player lowercase : Optional[int] =vnp.copy() # same because it is one dimensional lowercase : Union[str, Any] =torch.tensor(__magic_name__ ) elif key_name.endswith('''/g''' ): lowercase : Optional[Any] ='''model.blocks.%d.feed_forward.norm.weight''' % player lowercase : Any =vnp.copy() # same because it is one dimensional lowercase : List[Any] =torch.tensor(__magic_name__ ) elif key_name.startswith('''model/att''' ): lowercase : int =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): lowercase : Optional[int] =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum lowercase : Dict =state[:, 0, :, :] lowercase : Tuple =state[:, 1, :, :] lowercase : List[Any] =state[:, 2, :, :] lowercase : Optional[int] =( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase : Optional[Any] =( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase : Optional[int] =( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player lowercase : Dict =torch.tensor(__magic_name__ ) lowercase : List[Any] ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player lowercase : Optional[Any] =torch.tensor(__magic_name__ ) lowercase : Optional[Any] ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player lowercase : Tuple =torch.tensor(__magic_name__ ) elif key_name.endswith('''/o/kernel''' ): lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player lowercase : List[Any] =( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase : str =torch.tensor(__magic_name__ ) elif key_name.startswith('''model/an''' ): lowercase : Optional[Any] =int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase : List[str] ='''model.blocks.%d.self_attn.norm.bias''' % player lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional lowercase : List[str] =torch.tensor(__magic_name__ ) elif key_name.endswith('''/g''' ): lowercase : Any ='''model.blocks.%d.self_attn.norm.weight''' % player lowercase : Any =vnp.copy() # same because it is one dimensional lowercase : Optional[Any] =torch.tensor(__magic_name__ ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): lowercase : Any ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] lowercase : Optional[Any] ='''model.%s.weight''' % nlayer lowercase : Optional[int] =vnp.copy() # same in embedded lowercase : List[Any] =torch.tensor(__magic_name__ ) if key_name.startswith('''model/wte''' ): lowercase : Tuple ='''lm_head.weight''' lowercase : str =vnp.copy() # same in embedded lowercase : Union[str, Any] =torch.tensor(__magic_name__ ) elif key_name.startswith('''model/wob''' ): lowercase : List[str] ='''final_logits_bias''' lowercase : Dict =vnp.copy() # same in embedded lowercase : Tuple =state.reshape((1, -1) ) lowercase : Dict =torch.tensor(__magic_name__ ) elif key_name == "model/dense/kernel": lowercase : Dict ='''model.last_project.weight''' lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase : Optional[Any] =torch.tensor(__magic_name__ ) elif key_name == "model/dense_1/bias": lowercase : List[Any] ='''model.last_project.bias''' lowercase : str =vnp.copy() # same because it is one dimensional lowercase : Optional[Any] =torch.tensor(__magic_name__ ) torch.save(__magic_name__ , args.output ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser( description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""") parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""") UpperCamelCase_ = parser.parse_args() convert_tf_gptsan_to_pt(args)
88
1
'''simple docstring''' import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = BartphoTokenizer lowerCamelCase_ = False lowerCamelCase_ = True def lowerCamelCase_ ( self : Any ): '''simple docstring''' super().setUp() lowercase : List[Any] =['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] lowercase : int =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) lowercase : str ={'''unk_token''': '''<unk>'''} lowercase : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ) with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp: for token in vocab_tokens: fp.write(F'''{token} {vocab_tokens[token]}\n''' ) lowercase : int =BartphoTokenizer(UpperCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : Optional[Any] , **UpperCAmelCase__ : Tuple ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[Any] ='''This is a là test''' lowercase : Optional[Any] ='''This is a<unk><unk> test''' return input_text, output_text def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Tuple =BartphoTokenizer(UpperCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map ) lowercase : int ='''This is a là test''' lowercase : Optional[Any] ='''▁This ▁is ▁a ▁l à ▁t est'''.split() lowercase : Optional[int] =tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Any =tokens + [tokenizer.unk_token] lowercase : List[str] =[4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
88
'''simple docstring''' import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase_ = """▁""" UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = BigBirdTokenizer lowerCamelCase_ = BigBirdTokenizerFast lowerCamelCase_ = True lowerCamelCase_ = True def lowerCamelCase_ ( self : Any ): '''simple docstring''' super().setUp() lowercase : Optional[int] =self.tokenizer_class(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[int] ='''<s>''' lowercase : int =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Dict =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''[MASK]''' ) self.assertEqual(len(UpperCAmelCase__ ) , 1004 ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' if not self.test_rust_tokenizer: return lowercase : Optional[int] =self.get_tokenizer() lowercase : Any =self.get_rust_tokenizer() lowercase : int ='''I was born in 92000, and this is falsé.''' lowercase : List[str] =tokenizer.tokenize(UpperCAmelCase__ ) lowercase : Dict =rust_tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : str =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Optional[Any] =self.get_rust_tokenizer() lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ ) lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Tuple =BigBirdTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) lowercase : Tuple =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , ) lowercase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) lowercase : Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) lowercase : List[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def lowerCamelCase_ ( self : str ): '''simple docstring''' return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : str ='''Hello World!''' lowercase : Union[str, Any] =[65, 18536, 2260, 101, 66] self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @slow def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : int =( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) # fmt: off lowercase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231 # fmt: on self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @require_torch @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence lowercase : List[str] =list(self.big_tokenizer.get_vocab().keys() )[:10] lowercase : Dict =''' '''.join(UpperCAmelCase__ ) lowercase : Union[str, Any] =self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ ) lowercase : Dict =self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ ) lowercase : Optional[int] =BigBirdConfig(attention_type='''original_full''' ) lowercase : Dict =BigBirdModel(UpperCAmelCase__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**UpperCAmelCase__ ) model(**UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Union[str, Any] =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) lowercase : Dict =tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids ) self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' ) @slow def lowerCamelCase_ ( self : int ): '''simple docstring''' # fmt: off lowercase : str ={'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
88
1
'''simple docstring''' import torch from transformers import AutoModel class __SCREAMING_SNAKE_CASE ( torch.nn.Module ): def __init__( self : int , UpperCAmelCase__ : Tuple="sayef/fsner-bert-base-uncased" ): '''simple docstring''' super(UpperCAmelCase__ , self ).__init__() lowercase : str =AutoModel.from_pretrained(UpperCAmelCase__ , return_dict=UpperCAmelCase__ ) lowercase : Dict =torch.nn.CosineSimilarity(3 , 1E-08 ) lowercase : Optional[int] =torch.nn.Softmax(dim=1 ) def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Dict ): '''simple docstring''' return self.bert(**UpperCAmelCase__ ).last_hidden_state def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] ): '''simple docstring''' return token_embeddings.sum(2 , keepdim=UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any]=1 ): '''simple docstring''' return self.softmax(T * self.cos(UpperCAmelCase__ , UpperCAmelCase__ ) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict ): '''simple docstring''' lowercase : Union[str, Any] =W_supports['''sizes'''].tolist() lowercase : Optional[Any] =W_supports['''start_token_id'''].item() lowercase : int =W_supports['''end_token_id'''].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] lowercase : Tuple =self.BERT(**UpperCAmelCase__ ) lowercase : Dict =self.BERT(**UpperCAmelCase__ ) lowercase : Optional[Any] =None lowercase : str =None lowercase : int =W_supports['''input_ids'''] == start_token_id lowercase : int =W_supports['''input_ids'''] == end_token_id for i, size in enumerate(UpperCAmelCase__ ): if i == 0: lowercase : Tuple =0 else: lowercase : int =support_sizes[i - 1] lowercase : List[Any] =S[s : s + size][start_token_masks[s : s + size]] lowercase : Tuple =S[s : s + size][end_token_masks[s : s + size]] lowercase : int =torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) lowercase : Tuple =torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: lowercase : Tuple =torch.vstack((p_starts, p_start) ) lowercase : Union[str, Any] =torch.vstack((p_ends, p_end) ) else: lowercase : Optional[Any] =p_start lowercase : Optional[Any] =p_end return p_starts, p_ends
88
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] ) -> str: lowercase : Optional[Any] =[0 for i in range(r + 1 )] # nc0 = 1 lowercase : Optional[Any] =1 for i in range(1 , n + 1 ): # to compute current row from previous row. lowercase : str =min(__magic_name__ , __magic_name__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
88
1
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : str ) -> str: global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: lowercase : Tuple =mf_knapsack(i - 1 , __magic_name__ , __magic_name__ , __magic_name__ ) else: lowercase : Optional[Any] =max( mf_knapsack(i - 1 , __magic_name__ , __magic_name__ , __magic_name__ ) , mf_knapsack(i - 1 , __magic_name__ , __magic_name__ , j - wt[i - 1] ) + val[i - 1] , ) lowercase : Union[str, Any] =val return f[i][j] def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict ) -> int: lowercase : str =[[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: lowercase : Dict =max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: lowercase : Optional[int] =dp[i - 1][w_] return dp[n][w_], dp def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : list , __magic_name__ : list ) -> List[str]: if not (isinstance(__magic_name__ , (list, tuple) ) and isinstance(__magic_name__ , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) lowercase : Any =len(__magic_name__ ) if num_items != len(__magic_name__ ): lowercase : Dict =( '''The number of weights must be the same as the number of values.\n''' f'''But got {num_items} weights and {len(__magic_name__ )} values''' ) raise ValueError(__magic_name__ ) for i in range(__magic_name__ ): if not isinstance(wt[i] , __magic_name__ ): lowercase : str =( '''All weights must be integers but got weight of ''' f'''type {type(wt[i] )} at index {i}''' ) raise TypeError(__magic_name__ ) lowercase , lowercase : Tuple =knapsack(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) lowercase : set =set() _construct_solution(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) return optimal_val, example_optional_set def _lowerCAmelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int , __magic_name__ : int , __magic_name__ : set ) -> Optional[Any]: # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(__magic_name__ , __magic_name__ , i - 1 , __magic_name__ , __magic_name__ ) else: optimal_set.add(__magic_name__ ) _construct_solution(__magic_name__ , __magic_name__ , i - 1 , j - wt[i - 1] , __magic_name__ ) if __name__ == "__main__": UpperCamelCase_ = [3, 2, 4, 4] UpperCamelCase_ = [4, 3, 2, 3] UpperCamelCase_ = 4 UpperCamelCase_ = 6 UpperCamelCase_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] UpperCamelCase_ , UpperCamelCase_ = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 UpperCamelCase_ , UpperCamelCase_ = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
88
'''simple docstring''' from collections import defaultdict def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> bool: lowercase : Optional[int] =first_str.lower().strip() lowercase : Union[str, Any] =second_str.lower().strip() # Remove whitespace lowercase : Optional[int] =first_str.replace(''' ''' , '''''' ) lowercase : Optional[Any] =second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(__magic_name__ ) != len(__magic_name__ ): return False # Default values for count should be 0 lowercase : defaultdict[str, int] =defaultdict(__magic_name__ ) # For each character in input strings, # increment count in the corresponding for i in range(len(__magic_name__ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCamelCase_ = input("""Enter the first string """).strip() UpperCamelCase_ = input("""Enter the second string """).strip() UpperCamelCase_ = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
88
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : List[Any] =tempfile.mkdtemp() lowercase : Optional[Any] =BlipImageProcessor() lowercase : List[str] =GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' ) lowercase : Optional[int] =BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) lowercase : Tuple =InstructBlipProcessor(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : List[Any] , **UpperCAmelCase__ : Optional[int] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).tokenizer def lowerCamelCase_ ( self : List[str] , **UpperCAmelCase__ : Any ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor def lowerCamelCase_ ( self : str , **UpperCAmelCase__ : List[Any] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).qformer_tokenizer def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : int =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase : Optional[Any] =[Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : Tuple =InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) lowercase : Dict =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowercase : Tuple =self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) lowercase : Dict =InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase__ ) self.assertIsInstance(processor.qformer_tokenizer , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Tuple =self.get_image_processor() lowercase : Optional[int] =self.get_tokenizer() lowercase : Union[str, Any] =self.get_qformer_tokenizer() lowercase : int =InstructBlipProcessor( tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ , qformer_tokenizer=UpperCAmelCase__ ) lowercase : Optional[Any] =self.prepare_image_inputs() lowercase : Any =image_processor(UpperCAmelCase__ , return_tensors='''np''' ) lowercase : Any =processor(images=UpperCAmelCase__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Optional[Any] =self.get_image_processor() lowercase : str =self.get_tokenizer() lowercase : Any =self.get_qformer_tokenizer() lowercase : int =InstructBlipProcessor( tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ , qformer_tokenizer=UpperCAmelCase__ ) lowercase : Any ='''lower newer''' lowercase : List[Any] =processor(text=UpperCAmelCase__ ) lowercase : int =tokenizer(UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ ) lowercase : Optional[Any] =qformer_tokenizer(UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Optional[int] =self.get_image_processor() lowercase : Union[str, Any] =self.get_tokenizer() lowercase : int =self.get_qformer_tokenizer() lowercase : List[Any] =InstructBlipProcessor( tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ , qformer_tokenizer=UpperCAmelCase__ ) lowercase : int ='''lower newer''' lowercase : Optional[Any] =self.prepare_image_inputs() lowercase : Union[str, Any] =processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ ) self.assertListEqual( list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase__ ): processor() def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : int =self.get_image_processor() lowercase : int =self.get_tokenizer() lowercase : List[Any] =self.get_qformer_tokenizer() lowercase : List[str] =InstructBlipProcessor( tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ , qformer_tokenizer=UpperCAmelCase__ ) lowercase : Any =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase : List[Any] =processor.batch_decode(UpperCAmelCase__ ) lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Any =self.get_image_processor() lowercase : Any =self.get_tokenizer() lowercase : List[str] =self.get_qformer_tokenizer() lowercase : Union[str, Any] =InstructBlipProcessor( tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ , qformer_tokenizer=UpperCAmelCase__ ) lowercase : str ='''lower newer''' lowercase : List[Any] =self.prepare_image_inputs() lowercase : Optional[int] =processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ ) self.assertListEqual( list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
88
'''simple docstring''' import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = None lowerCamelCase_ = BloomTokenizerFast lowerCamelCase_ = BloomTokenizerFast lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = 'tokenizer_file' lowerCamelCase_ = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' super().setUp() lowercase : Union[str, Any] =BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Any ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : str =self.get_rust_tokenizer() lowercase : List[str] =['''The quick brown fox</s>''', '''jumps over the lazy dog</s>'''] lowercase : Any =[[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]] lowercase : Any =tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids'''] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any=6 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input lowercase : Tuple ='''This is a simple input''' lowercase : int =['''This is a simple input 1''', '''This is a simple input 2'''] lowercase : Optional[Any] =('''This is a simple input''', '''This is a pair''') lowercase : int =[ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests try: tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) except ValueError: self.fail('''Bloom Tokenizer should be able to deal with padding''' ) lowercase : Optional[int] =None # Hotfixing padding = None self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' ) # Simple input self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' ) # Simple input self.assertRaises( UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , ) # Pair input self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' ) # Pair input self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' ) # Pair input self.assertRaises( UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Union[str, Any] =self.get_rust_tokenizer() lowercase : Dict =load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ ) lowercase : Union[str, Any] =next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data lowercase : int =list(sample_data.values() ) lowercase : Any =list(map(tokenizer.encode , UpperCAmelCase__ ) ) lowercase : List[str] =[tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
88
1
'''simple docstring''' import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=99 , UpperCAmelCase__ : Tuple=36 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=512 , UpperCAmelCase__ : Optional[int]=16 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[Any]=6 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=1000 , ): '''simple docstring''' lowercase : Optional[Any] =parent lowercase : Tuple =batch_size lowercase : Union[str, Any] =num_channels lowercase : str =image_size lowercase : str =patch_size lowercase : Any =text_seq_length lowercase : List[Any] =is_training lowercase : Optional[Any] =use_input_mask lowercase : Union[str, Any] =use_token_type_ids lowercase : int =use_labels lowercase : List[str] =vocab_size lowercase : Tuple =hidden_size lowercase : Tuple =num_hidden_layers lowercase : Tuple =num_attention_heads lowercase : Dict =intermediate_size lowercase : int =hidden_act lowercase : int =hidden_dropout_prob lowercase : str =attention_probs_dropout_prob lowercase : Any =max_position_embeddings lowercase : Any =type_vocab_size lowercase : Union[str, Any] =type_sequence_label_size lowercase : Tuple =initializer_range lowercase : int =coordinate_size lowercase : List[Any] =shape_size lowercase : str =num_labels lowercase : Dict =num_choices lowercase : Optional[int] =scope lowercase : Dict =range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) lowercase : Any =text_seq_length lowercase : List[Any] =(image_size // patch_size) ** 2 + 1 lowercase : Optional[int] =self.text_seq_length + self.image_seq_length def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) lowercase : int =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowercase : Dict =bbox[i, j, 3] lowercase : int =bbox[i, j, 1] lowercase : Dict =t if bbox[i, j, 2] < bbox[i, j, 0]: lowercase : Optional[int] =bbox[i, j, 2] lowercase : str =bbox[i, j, 0] lowercase : Union[str, Any] =t lowercase : Dict =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : Optional[int] =None if self.use_input_mask: lowercase : Optional[int] =random_attention_mask([self.batch_size, self.text_seq_length] ) lowercase : Dict =None if self.use_token_type_ids: lowercase : Dict =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) lowercase : Union[str, Any] =None lowercase : Union[str, Any] =None if self.use_labels: lowercase : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : Optional[Any] =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) lowercase : Union[str, Any] =LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any ): '''simple docstring''' lowercase : Optional[int] =LayoutLMvaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() # text + image lowercase : Union[str, Any] =model(UpperCAmelCase__ , pixel_values=UpperCAmelCase__ ) lowercase : int =model( UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : Dict =model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : List[Any] =model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only lowercase : List[Any] =model(UpperCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only lowercase : Optional[Any] =model(pixel_values=UpperCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] ): '''simple docstring''' lowercase : Optional[int] =self.num_labels lowercase : Tuple =LayoutLMvaForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Union[str, Any] =model( UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ): '''simple docstring''' lowercase : Optional[int] =self.num_labels lowercase : Dict =LayoutLMvaForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Tuple =model( UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Optional[Any] =LayoutLMvaForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[str] =model( UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : List[str] =self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Optional[Any] =config_and_inputs lowercase : str ={ '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase_ = ( {'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel} if is_torch_available() else {} ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ): '''simple docstring''' # `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual # embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has # the sequence dimension of the text embedding only. # (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`) return True def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =LayoutLMvaModelTester(self ) lowercase : List[str] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict=False ): '''simple docstring''' lowercase : List[str] =copy.deepcopy(UpperCAmelCase__ ) if model_class in get_values(UpperCAmelCase__ ): lowercase : List[Any] ={ k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(UpperCAmelCase__ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(UpperCAmelCase__ ): lowercase : List[str] =torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) elif model_class in get_values(UpperCAmelCase__ ): lowercase : Tuple =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) lowercase : Optional[int] =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) elif model_class in [ *get_values(UpperCAmelCase__ ), ]: lowercase : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) elif model_class in [ *get_values(UpperCAmelCase__ ), ]: lowercase : Any =torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase__ , ) return inputs_dict def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase : Optional[Any] =type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Any ): '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : List[str] =LayoutLMvaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def _lowerCAmelCase ( ) -> List[str]: lowercase : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Union[str, Any] =LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(UpperCAmelCase__ ) lowercase : Optional[Any] =self.default_image_processor lowercase : Union[str, Any] =prepare_img() lowercase : Union[str, Any] =image_processor(images=UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values.to(UpperCAmelCase__ ) lowercase : Any =torch.tensor([[1, 2]] ) lowercase : Union[str, Any] =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass lowercase : int =model( input_ids=input_ids.to(UpperCAmelCase__ ) , bbox=bbox.to(UpperCAmelCase__ ) , pixel_values=pixel_values.to(UpperCAmelCase__ ) , ) # verify the logits lowercase : Union[str, Any] =torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase__ ) lowercase : str =torch.tensor( [[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
88
'''simple docstring''' import math def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float: if ( not isinstance(__magic_name__ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * power_factor def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float: if ( not isinstance(__magic_name__ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
88
1
'''simple docstring''' from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self : Any , UpperCAmelCase__ : pyspark.sql.DataFrame , UpperCAmelCase__ : Optional[NamedSplit] = None , UpperCAmelCase__ : Optional[Features] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "arrow" , **UpperCAmelCase__ : Tuple , ): '''simple docstring''' super().__init__( split=UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , **UpperCAmelCase__ , ) lowercase : Dict =load_from_cache_file lowercase : Dict =file_format lowercase : Union[str, Any] =Spark( df=UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , working_dir=UpperCAmelCase__ , **UpperCAmelCase__ , ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowercase : List[str] =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCAmelCase__ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
88
'''simple docstring''' import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ): '''simple docstring''' warnings.warn( '''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use CLIPImageProcessor instead.''' , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
88
1
'''simple docstring''' import argparse import copy def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]: lowercase : int ={} with open(__magic_name__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: lowercase : List[str] =[] _list.append([line.split()[1], line.split()[2]] ) lowercase : Tuple =_list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: lowercase : List[Any] =[] _list.append([line.split()[0], line.split()[2]] ) lowercase : Union[str, Any] =_list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> str: with open(__magic_name__ ) as f: lowercase : Optional[int] =f.read(1 ) lowercase : List[Any] =start_node lowercase : List[Any] =[] lowercase : str =start_node lowercase : str =0 while visiting not in first_solution: lowercase : Optional[int] =10000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution: lowercase : List[Any] =k[1] lowercase : str =k[0] first_solution.append(__magic_name__ ) lowercase : Any =distance_of_first_solution + int(__magic_name__ ) lowercase : Optional[int] =best_node first_solution.append(__magic_name__ ) lowercase : str =0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 lowercase : str =( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10000 ) return first_solution, distance_of_first_solution def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Any ) -> Tuple: lowercase : Tuple =[] for n in solution[1:-1]: lowercase : Dict =solution.index(__magic_name__ ) for kn in solution[1:-1]: lowercase : Tuple =solution.index(__magic_name__ ) if n == kn: continue lowercase : Union[str, Any] =copy.deepcopy(__magic_name__ ) lowercase : Optional[int] =kn lowercase : List[Any] =n lowercase : List[Any] =0 for k in _tmp[:-1]: lowercase : Optional[int] =_tmp[_tmp.index(__magic_name__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: lowercase : Optional[int] =distance + int(i[1] ) _tmp.append(__magic_name__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) lowercase : Union[str, Any] =len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Union[str, Any]: lowercase : str =1 lowercase : List[Any] =first_solution lowercase : Any =[] lowercase : str =distance_of_first_solution lowercase : str =solution while count <= iters: lowercase : Union[str, Any] =find_neighborhood(__magic_name__ , __magic_name__ ) lowercase : Dict =0 lowercase : int =neighborhood[index_of_best_solution] lowercase : Optional[int] =len(__magic_name__ ) - 1 lowercase : List[Any] =False while not found: lowercase : List[Any] =0 while i < len(__magic_name__ ): if best_solution[i] != solution[i]: lowercase : List[str] =best_solution[i] lowercase : Dict =solution[i] break lowercase : Any =i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) lowercase : str =True lowercase : int =best_solution[:-1] lowercase : Any =neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: lowercase : Optional[int] =cost lowercase : str =solution else: lowercase : Optional[int] =index_of_best_solution + 1 lowercase : List[Any] =neighborhood[index_of_best_solution] if len(__magic_name__ ) >= size: tabu_list.pop(0 ) lowercase : Optional[int] =count + 1 return best_solution_ever, best_cost def _lowerCAmelCase ( __magic_name__ : str=None ) -> Tuple: lowercase : List[str] =generate_neighbours(args.File ) lowercase , lowercase : Optional[Any] =generate_first_solution( args.File , __magic_name__ ) lowercase , lowercase : int =tabu_search( __magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , ) print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""") parser.add_argument( """-f""", """--File""", type=str, help="""Path to the file containing the data""", required=True, ) parser.add_argument( """-i""", """--Iterations""", type=int, help="""How many iterations the algorithm should perform""", required=True, ) parser.add_argument( """-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True ) # Pass the arguments to main method main(parser.parse_args())
88
'''simple docstring''' import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser( description=( """Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned""" """ Distillation""" ) ) parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""]) parser.add_argument("""--model_name""", default="""roberta-large""", type=str) parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str) parser.add_argument("""--vocab_transform""", action="""store_true""") UpperCamelCase_ = parser.parse_args() if args.model_type == "roberta": UpperCamelCase_ = RobertaForMaskedLM.from_pretrained(args.model_name) UpperCamelCase_ = """roberta""" elif args.model_type == "gpt2": UpperCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name) UpperCamelCase_ = """transformer""" UpperCamelCase_ = model.state_dict() UpperCamelCase_ = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: UpperCamelCase_ = state_dict[f'''{prefix}.{param_name}'''] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: UpperCamelCase_ = f'''{prefix}.embeddings.{w}.weight''' UpperCamelCase_ = state_dict[param_name] for w in ["weight", "bias"]: UpperCamelCase_ = f'''{prefix}.embeddings.LayerNorm.{w}''' UpperCamelCase_ = state_dict[param_name] # Transformer Blocks # UpperCamelCase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[ f'''{prefix}.h.{teacher_idx}.{layer}.{w}''' ] UpperCamelCase_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias'''] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}''' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: UpperCamelCase_ = state_dict[f'''{layer}'''] if args.vocab_transform: for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[f'''lm_head.dense.{w}'''] UpperCamelCase_ = state_dict[f'''lm_head.layer_norm.{w}'''] elif args.model_type == "gpt2": for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[f'''{prefix}.ln_f.{w}'''] UpperCamelCase_ = state_dict["""lm_head.weight"""] print(f'''N layers selected for distillation: {std_idx}''') print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
88
1
'''simple docstring''' import functools def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> int: lowercase : int =len(__magic_name__ ) lowercase : List[str] =len(__magic_name__ ) @functools.cache def min_distance(__magic_name__ : int , __magic_name__ : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowercase : str =int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __magic_name__ ) , 1 + min_distance(__magic_name__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
88
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict: for param in module.parameters(): lowercase : List[str] =False def _lowerCAmelCase ( ) -> List[str]: lowercase : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowercase : Optional[int] ='''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> str: lowercase : Optional[int] =plt.imshow(__magic_name__ ) fig.axes.get_xaxis().set_visible(__magic_name__ ) fig.axes.get_yaxis().set_visible(__magic_name__ ) plt.show() def _lowerCAmelCase ( ) -> List[Any]: lowercase : Any =datetime.now() lowercase : Dict =current_time.strftime('''%H:%M:%S''' ) return timestamp
88
1
'''simple docstring''' UpperCamelCase_ = [ """Audio""", """Array2D""", """Array3D""", """Array4D""", """Array5D""", """ClassLabel""", """Features""", """Sequence""", """Value""", """Image""", """Translation""", """TranslationVariableLanguages""", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
88
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _lowerCAmelCase ( ) -> List[Any]: lowercase : Tuple =HfArgumentParser(__magic_name__ ) lowercase : Union[str, Any] =parser.parse_args_into_dataclasses()[0] lowercase : Any =TensorFlowBenchmark(args=__magic_name__ ) try: lowercase : List[Any] =parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase : List[Any] ='''Arg --no_{0} is no longer used, please use --no-{0} instead.''' lowercase : Any =''' '''.join(str(__magic_name__ ).split(''' ''' )[:-1] ) lowercase : Optional[Any] ='''''' lowercase : List[str] =eval(str(__magic_name__ ).split(''' ''' )[-1] ) lowercase : Optional[Any] =[] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__magic_name__ ) if len(__magic_name__ ) > 0: lowercase : int =full_error_msg + begin_error_msg + str(__magic_name__ ) raise ValueError(__magic_name__ ) benchmark.run() if __name__ == "__main__": main()
88
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase_ = { """vocab_file""": { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt""" ), } } UpperCamelCase_ = { """junnyu/roformer_chinese_small""": 1536, """junnyu/roformer_chinese_base""": 1536, """junnyu/roformer_chinese_char_small""": 512, """junnyu/roformer_chinese_char_base""": 512, """junnyu/roformer_small_discriminator""": 128, """junnyu/roformer_small_generator""": 128, } UpperCamelCase_ = { """junnyu/roformer_chinese_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_base""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True}, """junnyu/roformer_small_discriminator""": {"""do_lower_case""": True}, """junnyu/roformer_small_generator""": {"""do_lower_case""": True}, } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = VOCAB_FILES_NAMES lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION lowerCamelCase_ = RoFormerTokenizer def __init__( self : Optional[int] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int="[UNK]" , UpperCAmelCase__ : Optional[int]="[SEP]" , UpperCAmelCase__ : Dict="[PAD]" , UpperCAmelCase__ : int="[CLS]" , UpperCAmelCase__ : Dict="[MASK]" , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Tuple=None , **UpperCAmelCase__ : Union[str, Any] , ): '''simple docstring''' super().__init__( UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , ) lowercase : Tuple =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('''lowercase''' , UpperCAmelCase__ ) != do_lower_case or pre_tok_state.get('''strip_accents''' , UpperCAmelCase__ ) != strip_accents ): lowercase : Optional[Any] =getattr(UpperCAmelCase__ , pre_tok_state.pop('''type''' ) ) lowercase : Dict =do_lower_case lowercase : Union[str, Any] =strip_accents lowercase : str =pre_tok_class(**UpperCAmelCase__ ) lowercase : int =do_lower_case def __getstate__( self : Optional[Any] ): '''simple docstring''' lowercase : List[Any] =self.__dict__.copy() lowercase : List[Any] =BertPreTokenizer() return state def __setstate__( self : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Tuple =d lowercase : str =self.__dict__['''_tokenizer'''].get_vocab() lowercase : List[str] =PreTokenizer.custom(JiebaPreTokenizer(UpperCAmelCase__ ) ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any=None ): '''simple docstring''' lowercase : Optional[int] =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase : Optional[int] =[self.sep_token_id] lowercase : Tuple =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ): '''simple docstring''' lowercase : Tuple =self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ ) return tuple(UpperCAmelCase__ ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : int=False , **UpperCAmelCase__ : Any , ): '''simple docstring''' lowercase : Optional[int] =BertPreTokenizer() return super().save_pretrained(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
88
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> bool: lowercase : str =len(__magic_name__ ) # We need to create solution object to save path. lowercase : int =[[0 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )] lowercase : List[Any] =run_maze(__magic_name__ , 0 , 0 , __magic_name__ ) if solved: print('''\n'''.join(str(__magic_name__ ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[list[int]] ) -> bool: lowercase : Optional[int] =len(__magic_name__ ) # Final check point. if i == j == (size - 1): lowercase : Optional[int] =1 return True lowercase : Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds lowercase : Tuple =(i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. lowercase : Union[str, Any] =(not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited lowercase : Union[str, Any] =1 # check for directions if ( run_maze(__magic_name__ , i + 1 , __magic_name__ , __magic_name__ ) or run_maze(__magic_name__ , __magic_name__ , j + 1 , __magic_name__ ) or run_maze(__magic_name__ , i - 1 , __magic_name__ , __magic_name__ ) or run_maze(__magic_name__ , __magic_name__ , j - 1 , __magic_name__ ) ): return True lowercase : str =0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
88
1
'''simple docstring''' import random from typing import Any def _lowerCAmelCase ( __magic_name__ : list ) -> list[Any]: for _ in range(len(__magic_name__ ) ): lowercase : str =random.randint(0 , len(__magic_name__ ) - 1 ) lowercase : Optional[int] =random.randint(0 , len(__magic_name__ ) - 1 ) lowercase , lowercase : List[str] =data[b], data[a] return data if __name__ == "__main__": UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7] UpperCamelCase_ = ["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
88
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM lowercase : Any =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ ) @torch.no_grad() def __call__( self : List[Any] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ): '''simple docstring''' # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size , UpperCAmelCase__ ): lowercase : Optional[int] =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowercase : Optional[int] =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowercase : str =randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCAmelCase__ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowercase : Dict =self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowercase : Dict =self.scheduler.step( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample lowercase : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 ) lowercase : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase : List[str] =self.numpy_to_pil(UpperCAmelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase__ )
88
1
'''simple docstring''' from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCamelCase_ = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCamelCase_ = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCamelCase_ = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[List[List[str]]] , UpperCAmelCase__ : List[List[str]] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 4 , ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=UpperCAmelCase__ , hypotheses=UpperCAmelCase__ , min_len=UpperCAmelCase__ , max_len=UpperCAmelCase__ ) }
88
'''simple docstring''' import argparse import copy def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]: lowercase : int ={} with open(__magic_name__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: lowercase : List[str] =[] _list.append([line.split()[1], line.split()[2]] ) lowercase : Tuple =_list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: lowercase : List[Any] =[] _list.append([line.split()[0], line.split()[2]] ) lowercase : Union[str, Any] =_list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> str: with open(__magic_name__ ) as f: lowercase : Optional[int] =f.read(1 ) lowercase : List[Any] =start_node lowercase : List[Any] =[] lowercase : str =start_node lowercase : str =0 while visiting not in first_solution: lowercase : Optional[int] =10000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution: lowercase : List[Any] =k[1] lowercase : str =k[0] first_solution.append(__magic_name__ ) lowercase : Any =distance_of_first_solution + int(__magic_name__ ) lowercase : Optional[int] =best_node first_solution.append(__magic_name__ ) lowercase : str =0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 lowercase : str =( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10000 ) return first_solution, distance_of_first_solution def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Any ) -> Tuple: lowercase : Tuple =[] for n in solution[1:-1]: lowercase : Dict =solution.index(__magic_name__ ) for kn in solution[1:-1]: lowercase : Tuple =solution.index(__magic_name__ ) if n == kn: continue lowercase : Union[str, Any] =copy.deepcopy(__magic_name__ ) lowercase : Optional[int] =kn lowercase : List[Any] =n lowercase : List[Any] =0 for k in _tmp[:-1]: lowercase : Optional[int] =_tmp[_tmp.index(__magic_name__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: lowercase : Optional[int] =distance + int(i[1] ) _tmp.append(__magic_name__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) lowercase : Union[str, Any] =len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Union[str, Any]: lowercase : str =1 lowercase : List[Any] =first_solution lowercase : Any =[] lowercase : str =distance_of_first_solution lowercase : str =solution while count <= iters: lowercase : Union[str, Any] =find_neighborhood(__magic_name__ , __magic_name__ ) lowercase : Dict =0 lowercase : int =neighborhood[index_of_best_solution] lowercase : Optional[int] =len(__magic_name__ ) - 1 lowercase : List[Any] =False while not found: lowercase : List[Any] =0 while i < len(__magic_name__ ): if best_solution[i] != solution[i]: lowercase : List[str] =best_solution[i] lowercase : Dict =solution[i] break lowercase : Any =i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) lowercase : str =True lowercase : int =best_solution[:-1] lowercase : Any =neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: lowercase : Optional[int] =cost lowercase : str =solution else: lowercase : Optional[int] =index_of_best_solution + 1 lowercase : List[Any] =neighborhood[index_of_best_solution] if len(__magic_name__ ) >= size: tabu_list.pop(0 ) lowercase : Optional[int] =count + 1 return best_solution_ever, best_cost def _lowerCAmelCase ( __magic_name__ : str=None ) -> Tuple: lowercase : List[str] =generate_neighbours(args.File ) lowercase , lowercase : Optional[Any] =generate_first_solution( args.File , __magic_name__ ) lowercase , lowercase : int =tabu_search( __magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , ) print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""") parser.add_argument( """-f""", """--File""", type=str, help="""Path to the file containing the data""", required=True, ) parser.add_argument( """-i""", """--Iterations""", type=int, help="""How many iterations the algorithm should perform""", required=True, ) parser.add_argument( """-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True ) # Pass the arguments to main method main(parser.parse_args())
88
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""", """microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""", } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'markuplm' def __init__( self : Tuple , UpperCAmelCase__ : str=30522 , UpperCAmelCase__ : Optional[Any]=768 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : Dict=3072 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Tuple=1024 , UpperCAmelCase__ : List[str]=216 , UpperCAmelCase__ : Optional[int]=1001 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Any=50 , UpperCAmelCase__ : List[str]="absolute" , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : str , ): '''simple docstring''' super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , ) lowercase : Optional[int] =vocab_size lowercase : Optional[int] =hidden_size lowercase : Tuple =num_hidden_layers lowercase : str =num_attention_heads lowercase : str =hidden_act lowercase : List[Any] =intermediate_size lowercase : Any =hidden_dropout_prob lowercase : Union[str, Any] =attention_probs_dropout_prob lowercase : Optional[int] =max_position_embeddings lowercase : Any =type_vocab_size lowercase : str =initializer_range lowercase : List[str] =layer_norm_eps lowercase : Optional[Any] =position_embedding_type lowercase : Dict =use_cache lowercase : str =classifier_dropout # additional properties lowercase : Optional[Any] =max_depth lowercase : Tuple =max_xpath_tag_unit_embeddings lowercase : Optional[Any] =max_xpath_subs_unit_embeddings lowercase : Tuple =tag_pad_id lowercase : str =subs_pad_id lowercase : Any =xpath_unit_hidden_size
88
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int = 1000000 ) -> int: lowercase : Dict =set(range(3 , __magic_name__ , 2 ) ) primes.add(2 ) for p in range(3 , __magic_name__ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , __magic_name__ , __magic_name__ ) ) ) lowercase : List[Any] =[float(__magic_name__ ) for n in range(limit + 1 )] for p in primes: for n in range(__magic_name__ , limit + 1 , __magic_name__ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f'''{solution() = }''')
88
1
'''simple docstring''' import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("""--user""", type=str, default="""ubuntu""") parser.add_argument("""--host""", type=str, default="""localhost""") parser.add_argument("""--key_path""", type=str, default=None) parser.add_argument("""--instance""", type=str, default="""V100:1""") parser.add_argument("""--provider""", type=str, default="""cheapest""") parser.add_argument("""--use_spot""", type=bool, default=False) parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""") UpperCamelCase_ , UpperCamelCase_ = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError("""Cannot specify both BYO and on-demand cluster args""") UpperCamelCase_ = rh.cluster( name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path} ) else: UpperCamelCase_ = rh.cluster( name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) UpperCamelCase_ = args.example.rsplit("""/""", 1)[0] # Set up remote environment cluster.install_packages(["""pip:./"""]) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt''']) cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""]) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}''']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
88
'''simple docstring''' import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = BioGptTokenizer lowerCamelCase_ = False def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase : List[str] =[ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] lowercase : Any =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) lowercase : Union[str, Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase__ ) ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Dict ='''lower newer''' lowercase : str ='''lower newer''' return input_text, output_text def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : List[Any] =BioGptTokenizer(self.vocab_file , self.merges_file ) lowercase : Any ='''lower''' lowercase : int =['''low''', '''er</w>'''] lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : Optional[int] =tokens + ['''<unk>'''] lowercase : Any =[14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Dict =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) lowercase : List[str] =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ ) lowercase : Optional[int] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ ) lowercase : str =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) lowercase : Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
88
1
'''simple docstring''' import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'vision-encoder-decoder' lowerCamelCase_ = True def __init__( self : Optional[int] , **UpperCAmelCase__ : Tuple ): '''simple docstring''' super().__init__(**UpperCAmelCase__ ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( F'''A configuraton of type {self.model_type} cannot be instantiated because ''' F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) lowercase : Optional[Any] =kwargs.pop('''encoder''' ) lowercase : List[Any] =encoder_config.pop('''model_type''' ) lowercase : List[str] =kwargs.pop('''decoder''' ) lowercase : Dict =decoder_config.pop('''model_type''' ) lowercase : Union[str, Any] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : List[str] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : str =True @classmethod def lowerCamelCase_ ( cls : List[str] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Optional[int] ): '''simple docstring''' logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowercase : int =True lowercase : Optional[Any] =True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int =copy.deepcopy(self.__dict__ ) lowercase : Union[str, Any] =self.encoder.to_dict() lowercase : Union[str, Any] =self.decoder.to_dict() lowercase : int =self.__class__.model_type return output class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = version.parse('1.11' ) @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return 1E-4 @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} ) class __SCREAMING_SNAKE_CASE ( lowercase__ ): @property def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : List[str] =OrderedDict() lowercase : Tuple ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''} lowercase : Optional[int] ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''} lowercase : int ={0: '''batch''', 1: '''encoder_sequence'''} return common_inputs def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , ): '''simple docstring''' import torch lowercase : Optional[Any] =OrderedDict() lowercase : List[Any] =super().generate_dummy_inputs( UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ ) lowercase , lowercase : Optional[int] =dummy_input['''input_ids'''].shape lowercase : Union[str, Any] =(batch, encoder_sequence, self._config.encoder_hidden_size) lowercase : List[str] =dummy_input.pop('''input_ids''' ) lowercase : Tuple =dummy_input.pop('''attention_mask''' ) lowercase : Union[str, Any] =torch.zeros(UpperCAmelCase__ ) return common_inputs class __SCREAMING_SNAKE_CASE ( lowercase__ ): @property def lowerCamelCase_ ( self : int ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : PretrainedConfig ): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" ): '''simple docstring''' lowercase : List[Any] =encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase__ , UpperCAmelCase__ )
88
'''simple docstring''' import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=4 , ): '''simple docstring''' lowercase : int =parent lowercase : List[str] =batch_size lowercase : str =seq_length lowercase : Optional[Any] =is_training lowercase : Union[str, Any] =use_attention_mask lowercase : Optional[Any] =use_token_type_ids lowercase : Tuple =use_labels lowercase : List[str] =vocab_size lowercase : List[str] =hidden_size lowercase : Tuple =num_hidden_layers lowercase : Any =num_attention_heads lowercase : List[str] =intermediate_size lowercase : Optional[Any] =hidden_act lowercase : Dict =hidden_dropout_prob lowercase : List[Any] =attention_probs_dropout_prob lowercase : Optional[Any] =max_position_embeddings lowercase : Tuple =type_vocab_size lowercase : Optional[int] =type_sequence_label_size lowercase : Optional[Any] =initializer_range lowercase : Optional[int] =num_choices def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Union[str, Any] =None if self.use_attention_mask: lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) lowercase : Tuple =None if self.use_token_type_ids: lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase : int =RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : List[Any] =self.prepare_config_and_inputs() lowercase , lowercase , lowercase , lowercase : str =config_and_inputs lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : List[str] =self.prepare_config_and_inputs() lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs lowercase : List[str] =True lowercase : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = True lowerCamelCase_ = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =FlaxRobertaModelTester(self ) @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase : Optional[int] =model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCAmelCase__ ) lowercase : List[Any] =model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase__ )
88
1