code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _SCREAMING_SNAKE_CASE : Any = { "configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"], "tokenization_roc_bert": ["RoCBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[Any] = [ "ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RoCBertForCausalLM", "RoCBertForMaskedLM", "RoCBertForMultipleChoice", "RoCBertForPreTraining", "RoCBertForQuestionAnswering", "RoCBertForSequenceClassification", "RoCBertForTokenClassification", "RoCBertLayer", "RoCBertModel", "RoCBertPreTrainedModel", "load_tf_weights_in_roc_bert", ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys _SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
'''simple docstring''' # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int ): with open(_SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*_SCREAMING_SNAKE_CASE ) finally: fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __lowercase : Dict = int(os.environ['LOCAL_RANK']) torch.cuda.set_device(local_rank) __lowercase : Tuple = torch.device('cuda', local_rank) __lowercase : Optional[int] = socket.gethostname() __lowercase : List[str] = f'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group('nccl') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __lowercase : str = dist.get_rank() __lowercase : Union[str, Any] = dist.get_world_size() printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(f'''{gpu} is broken''') raise
27
0
"""simple docstring""" from math import pi, sqrt def a__ ( __lowercase ) -> float: if num <= 0: raise ValueError("math domain error" ) if num > 171.5: raise OverflowError("math range error" ) elif num - int(__lowercase ) not in (0, 0.5): raise NotImplementedError("num must be an integer or a half-integer" ) elif num == 0.5: return sqrt(__lowercase ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def a__ ( ) -> None: assert gamma(0.5 ) == sqrt(__lowercase ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() a_ = 1.0 while num: a_ = float(input("Gamma of: ")) print(f'''gamma({num}) = {gamma(num)}''') print("\nEnter 0 to exit...")
163
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ = { "configuration_xlm_roberta": [ "XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaConfig", "XLMRobertaOnnxConfig", ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["XLMRobertaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["XLMRobertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMRobertaForCausalLM", "XLMRobertaForMaskedLM", "XLMRobertaForMultipleChoice", "XLMRobertaForQuestionAnswering", "XLMRobertaForSequenceClassification", "XLMRobertaForTokenClassification", "XLMRobertaModel", "XLMRobertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMRobertaForCausalLM", "TFXLMRobertaForMaskedLM", "TFXLMRobertaForMultipleChoice", "TFXLMRobertaForQuestionAnswering", "TFXLMRobertaForSequenceClassification", "TFXLMRobertaForTokenClassification", "TFXLMRobertaModel", "TFXLMRobertaPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxXLMRobertaForMaskedLM", "FlaxXLMRobertaForCausalLM", "FlaxXLMRobertaForMultipleChoice", "FlaxXLMRobertaForQuestionAnswering", "FlaxXLMRobertaForSequenceClassification", "FlaxXLMRobertaForTokenClassification", "FlaxXLMRobertaModel", "FlaxXLMRobertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
163
1
"""simple docstring""" import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowerCAmelCase = """src/diffusers""" lowerCAmelCase = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowerCAmelCase = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowerCAmelCase = spec.loader.load_module() def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : List[Any] ) ->Union[str, Any]: return line.startswith(snake_case_ ) or len(snake_case_ ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , snake_case_ ) is not None def lowerCAmelCase_ ( snake_case_ : int ) ->Optional[int]: lowerCamelCase__ : Dict =object_name.split('.' ) lowerCamelCase__ : Tuple =0 # First let's find the module where our object lives. lowerCamelCase__ : Union[str, Any] =parts[i] while i < len(snake_case_ ) and not os.path.isfile(os.path.join(snake_case_ , f"""{module}.py""" ) ): i += 1 if i < len(snake_case_ ): lowerCamelCase__ : Dict =os.path.join(snake_case_ , parts[i] ) if i >= len(snake_case_ ): raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" ) with open(os.path.join(snake_case_ , f"""{module}.py""" ) , 'r' , encoding='utf-8' , newline='\n' ) as f: lowerCamelCase__ : Optional[Any] =f.readlines() # Now let's find the class / func in the code! lowerCamelCase__ : List[Any] ='' lowerCamelCase__ : Any =0 for name in parts[i + 1 :]: while ( line_index < len(snake_case_ ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(snake_case_ ): raise ValueError(f""" {object_name} does not match any function or class in {module}.""" ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). lowerCamelCase__ : Optional[Any] =line_index while line_index < len(snake_case_ ) and _should_continue(lines[line_index] , snake_case_ ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 lowerCamelCase__ : List[str] =lines[start_index:line_index] return "".join(snake_case_ ) lowerCAmelCase = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowerCAmelCase = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowerCAmelCase = re.compile(r"""<FILL\s+[^>]*>""") def lowerCAmelCase_ ( snake_case_ : Tuple ) ->Tuple: lowerCamelCase__ : Tuple =code.split('\n' ) lowerCamelCase__ : Union[str, Any] =0 while idx < len(snake_case_ ) and len(lines[idx] ) == 0: idx += 1 if idx < len(snake_case_ ): return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0] return "" def lowerCAmelCase_ ( snake_case_ : List[str] ) ->Tuple: lowerCamelCase__ : Union[str, Any] =len(get_indent(snake_case_ ) ) > 0 if has_indent: lowerCamelCase__ : int =f"""class Bla:\n{code}""" lowerCamelCase__ : Optional[int] =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=snake_case_ ) lowerCamelCase__ : Dict =black.format_str(snake_case_ , mode=snake_case_ ) lowerCamelCase__ , lowerCamelCase__ : Dict =style_docstrings_in_code(snake_case_ ) return result[len('class Bla:\n' ) :] if has_indent else result def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : List[Any]=False ) ->List[Any]: with open(snake_case_ , 'r' , encoding='utf-8' , newline='\n' ) as f: lowerCamelCase__ : Optional[int] =f.readlines() lowerCamelCase__ : Optional[Any] =[] lowerCamelCase__ : Optional[Any] =0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(snake_case_ ): lowerCamelCase__ : Optional[Any] =_re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =search.groups() lowerCamelCase__ : Union[str, Any] =find_code_in_diffusers(snake_case_ ) lowerCamelCase__ : Dict =get_indent(snake_case_ ) lowerCamelCase__ : List[Any] =line_index + 1 if indent == theoretical_indent else line_index + 2 lowerCamelCase__ : List[Any] =theoretical_indent lowerCamelCase__ : Union[str, Any] =start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. lowerCamelCase__ : Dict =True while line_index < len(snake_case_ ) and should_continue: line_index += 1 if line_index >= len(snake_case_ ): break lowerCamelCase__ : List[Any] =lines[line_index] lowerCamelCase__ : Optional[int] =_should_continue(snake_case_ , snake_case_ ) and re.search(f"""^{indent}# End copy""" , snake_case_ ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 lowerCamelCase__ : List[Any] =lines[start_index:line_index] lowerCamelCase__ : Tuple =''.join(snake_case_ ) # Remove any nested `Copied from` comments to avoid circular copies lowerCamelCase__ : str =[line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(snake_case_ ) is None] lowerCamelCase__ : List[str] ='\n'.join(snake_case_ ) # Before comparing, use the `replace_pattern` on the original code. if len(snake_case_ ) > 0: lowerCamelCase__ : Optional[Any] =replace_pattern.replace('with' , '' ).split(',' ) lowerCamelCase__ : str =[_re_replace_pattern.search(snake_case_ ) for p in patterns] for pattern in patterns: if pattern is None: continue lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =pattern.groups() lowerCamelCase__ : List[Any] =re.sub(snake_case_ , snake_case_ , snake_case_ ) if option.strip() == "all-casing": lowerCamelCase__ : Union[str, Any] =re.sub(obja.lower() , obja.lower() , snake_case_ ) lowerCamelCase__ : Dict =re.sub(obja.upper() , obja.upper() , snake_case_ ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line lowerCamelCase__ : Union[str, Any] =blackify(lines[start_index - 1] + theoretical_code ) lowerCamelCase__ : Dict =theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: lowerCamelCase__ : str =lines[:start_index] + [theoretical_code] + lines[line_index:] lowerCamelCase__ : Optional[int] =start_index + 1 if overwrite and len(snake_case_ ) > 0: # Warn the user a file has been modified. print(f"""Detected changes, rewriting {filename}.""" ) with open(snake_case_ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(snake_case_ ) return diffs def lowerCAmelCase_ ( snake_case_ : bool = False ) ->Any: lowerCamelCase__ : str =glob.glob(os.path.join(snake_case_ , '**/*.py' ) , recursive=snake_case_ ) lowerCamelCase__ : Optional[int] =[] for filename in all_files: lowerCamelCase__ : Any =is_copy_consistent(snake_case_ , snake_case_ ) diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs] if not overwrite and len(snake_case_ ) > 0: lowerCamelCase__ : Dict ='\n'.join(snake_case_ ) raise Exception( 'Found the following copy inconsistencies:\n' + diff + '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowerCAmelCase = parser.parse_args() check_copies(args.fix_and_overwrite)
126
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase = logging.get_logger(__name__) class A_ ( A__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ["""audio_values""", """audio_mask"""] def __init__( self :List[str] , lowerCamelCase_ :List[str]=2_048 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=[16, 16] , lowerCamelCase_ :str=128 , lowerCamelCase_ :Union[str, Any]=44_100 , lowerCamelCase_ :Optional[Any]=86 , lowerCamelCase_ :Dict=2_048 , lowerCamelCase_ :Union[str, Any]=0.0 , **lowerCamelCase_ :Tuple , ): """simple docstring""" super().__init__( feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ , ) lowerCamelCase__ : List[str] =spectrogram_length lowerCamelCase__ : Dict =num_channels lowerCamelCase__ : List[Any] =patch_size lowerCamelCase__ : Union[str, Any] =feature_size // self.patch_size[1] lowerCamelCase__ : int =n_fft lowerCamelCase__ : List[str] =sampling_rate // hop_length_to_sampling_rate lowerCamelCase__ : str =sampling_rate lowerCamelCase__ : int =padding_value lowerCamelCase__ : Dict =mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCamelCase_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCamelCase_ , norm='slaney' , mel_scale='slaney' , ).T def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :np.array ): """simple docstring""" lowerCamelCase__ : List[Any] =spectrogram( lowerCamelCase_ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , ) lowerCamelCase__ : Any =log_spec[:, :-1] lowerCamelCase__ : Tuple =log_spec - 20.0 lowerCamelCase__ : List[str] =np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self :Optional[Any] , lowerCamelCase_ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[bool] = True , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = False , **lowerCamelCase_ :Tuple , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) lowerCamelCase__ : Dict =isinstance(lowerCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) lowerCamelCase__ : Union[str, Any] =is_batched_numpy or ( isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase__ : Optional[Any] =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ): lowerCamelCase__ : Optional[Any] =np.asarray(lowerCamelCase_ , dtype=np.floataa ) elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase__ : Union[str, Any] =raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase__ : List[str] =[np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowerCamelCase__ : Any =[ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , lowerCamelCase_ ): lowerCamelCase__ : Dict =[np.asarray(lowerCamelCase_ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowerCamelCase__ : Optional[Any] =max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowerCamelCase__ : Any =[ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowerCamelCase__ : Union[str, Any] =np.array(lowerCamelCase_ ).astype(np.floataa ) # convert into correct format for padding lowerCamelCase__ : Tuple =max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowerCamelCase__ : str =np.ones([len(lowerCamelCase_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowerCamelCase__ : Dict =padded_audio_features * self.padding_value for i in range(len(lowerCamelCase_ ) ): lowerCamelCase__ : Union[str, Any] =audio_features[i] lowerCamelCase__ : Union[str, Any] =feature # return as BatchFeature if return_attention_mask: lowerCamelCase__ : int ={'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: lowerCamelCase__ : Tuple ={'audio_values': padded_audio_features} lowerCamelCase__ : Union[str, Any] =BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ ) return encoded_inputs
126
1
'''simple docstring''' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = set() # edges = list of graph's edges _snake_case = get_edges(_SCREAMING_SNAKE_CASE ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: _snake_case, _snake_case = edges.pop() chosen_vertices.add(_SCREAMING_SNAKE_CASE ) chosen_vertices.add(_SCREAMING_SNAKE_CASE ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(_SCREAMING_SNAKE_CASE ) return chosen_vertices def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
270
'''simple docstring''' import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowercase (self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() def lowercase (self ) -> Dict: _snake_case, _snake_case = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-canny""" , from_pt=UpperCAmelCase , dtype=jnp.bfloataa ) _snake_case, _snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa ) _snake_case = controlnet_params _snake_case = """bird""" _snake_case = jax.device_count() _snake_case = pipe.prepare_text_inputs([prompts] * num_samples ) _snake_case = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ) _snake_case = pipe.prepare_image_inputs([canny_image] * num_samples ) _snake_case = jax.random.PRNGKey(0 ) _snake_case = jax.random.split(UpperCAmelCase , jax.device_count() ) _snake_case = replicate(UpperCAmelCase ) _snake_case = shard(UpperCAmelCase ) _snake_case = shard(UpperCAmelCase ) _snake_case = pipe( prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) _snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _snake_case = images[0, 253:256, 253:256, -1] _snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _snake_case = jnp.array( [0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def lowercase (self ) -> Optional[int]: _snake_case, _snake_case = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-openpose""" , from_pt=UpperCAmelCase , dtype=jnp.bfloataa ) _snake_case, _snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa ) _snake_case = controlnet_params _snake_case = """Chef in the kitchen""" _snake_case = jax.device_count() _snake_case = pipe.prepare_text_inputs([prompts] * num_samples ) _snake_case = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" ) _snake_case = pipe.prepare_image_inputs([pose_image] * num_samples ) _snake_case = jax.random.PRNGKey(0 ) _snake_case = jax.random.split(UpperCAmelCase , jax.device_count() ) _snake_case = replicate(UpperCAmelCase ) _snake_case = shard(UpperCAmelCase ) _snake_case = shard(UpperCAmelCase ) _snake_case = pipe( prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) _snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _snake_case = images[0, 253:256, 253:256, -1] _snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _snake_case = jnp.array( [[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
270
1
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class a ( _a ): UpperCamelCase : Any = """blenderbot-small""" UpperCamelCase : List[Any] = ["""past_key_values"""] UpperCamelCase : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Tuple , lowerCAmelCase : Any=5_0265 , lowerCAmelCase : Any=512 , lowerCAmelCase : Dict=8 , lowerCAmelCase : List[Any]=2048 , lowerCAmelCase : Any=16 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : List[str]=2048 , lowerCAmelCase : Optional[int]=16 , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : int=True , lowerCAmelCase : str="gelu" , lowerCAmelCase : Union[str, Any]=512 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Any=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Optional[int]=0.0_2 , lowerCAmelCase : List[str]=1 , lowerCAmelCase : str=False , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=2 , lowerCAmelCase : Tuple=2 , **lowerCAmelCase : str , ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Dict =vocab_size SCREAMING_SNAKE_CASE_: Dict =max_position_embeddings SCREAMING_SNAKE_CASE_: Optional[int] =d_model SCREAMING_SNAKE_CASE_: str =encoder_ffn_dim SCREAMING_SNAKE_CASE_: str =encoder_layers SCREAMING_SNAKE_CASE_: Any =encoder_attention_heads SCREAMING_SNAKE_CASE_: Any =decoder_ffn_dim SCREAMING_SNAKE_CASE_: List[str] =decoder_layers SCREAMING_SNAKE_CASE_: str =decoder_attention_heads SCREAMING_SNAKE_CASE_: Dict =dropout SCREAMING_SNAKE_CASE_: str =attention_dropout SCREAMING_SNAKE_CASE_: Union[str, Any] =activation_dropout SCREAMING_SNAKE_CASE_: Tuple =activation_function SCREAMING_SNAKE_CASE_: Optional[int] =init_std SCREAMING_SNAKE_CASE_: Any =encoder_layerdrop SCREAMING_SNAKE_CASE_: Optional[int] =decoder_layerdrop SCREAMING_SNAKE_CASE_: Optional[Any] =use_cache SCREAMING_SNAKE_CASE_: Dict =encoder_layers SCREAMING_SNAKE_CASE_: List[str] =scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , ) class a ( _a ): @property def lowerCamelCase__ ( self : Any ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: SCREAMING_SNAKE_CASE_: Tuple ={0: 'batch'} SCREAMING_SNAKE_CASE_: Optional[int] ={0: 'batch', 1: 'past_decoder_sequence + sequence'} else: SCREAMING_SNAKE_CASE_: str ={0: 'batch', 1: 'decoder_sequence'} SCREAMING_SNAKE_CASE_: int ={0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. SCREAMING_SNAKE_CASE_: Tuple =OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: SCREAMING_SNAKE_CASE_: List[str] =self.num_layers for i in range(lowerCAmelCase ): SCREAMING_SNAKE_CASE_: int ={0: 'batch', 2: 'past_sequence + sequence'} SCREAMING_SNAKE_CASE_: str ={0: 'batch', 2: 'past_sequence + sequence'} else: SCREAMING_SNAKE_CASE_: str =OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property def lowerCamelCase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE_: Any =super().outputs else: SCREAMING_SNAKE_CASE_: Tuple =super(lowerCAmelCase , self ).outputs if self.use_past: SCREAMING_SNAKE_CASE_: int =self.num_layers for i in range(lowerCAmelCase ): SCREAMING_SNAKE_CASE_: Any ={0: 'batch', 2: 'past_sequence + sequence'} SCREAMING_SNAKE_CASE_: Tuple ={0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def lowerCamelCase__ ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : List[Any] = -1 , lowerCAmelCase : Tuple = -1 , lowerCAmelCase : str = False , lowerCAmelCase : int = None , ) -> Mapping[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Optional[int] =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # Generate decoder inputs SCREAMING_SNAKE_CASE_: str =seq_length if not self.use_past else 1 SCREAMING_SNAKE_CASE_: List[Any] =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] ={f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} SCREAMING_SNAKE_CASE_: Any =dict(**lowerCAmelCase , **lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch SCREAMING_SNAKE_CASE_: int =common_inputs['input_ids'].shape SCREAMING_SNAKE_CASE_: Dict =common_inputs['decoder_input_ids'].shape[1] SCREAMING_SNAKE_CASE_: int =self.num_attention_heads SCREAMING_SNAKE_CASE_: Tuple =( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) SCREAMING_SNAKE_CASE_: Tuple =decoder_seq_length + 3 SCREAMING_SNAKE_CASE_: str =( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) SCREAMING_SNAKE_CASE_: int =torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 ) SCREAMING_SNAKE_CASE_: Optional[int] =[] # If the number of encoder and decoder layers are present in the model configuration, both are considered SCREAMING_SNAKE_CASE_: Dict =self.num_layers SCREAMING_SNAKE_CASE_: List[str] =min(lowerCAmelCase , lowerCAmelCase ) SCREAMING_SNAKE_CASE_: str =max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers SCREAMING_SNAKE_CASE_: Optional[int] ='encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(lowerCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), ) ) # TODO: test this. SCREAMING_SNAKE_CASE_: List[Any] =encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(lowerCAmelCase , lowerCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) ) return common_inputs def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] = -1 , lowerCAmelCase : Any = -1 , lowerCAmelCase : List[Any] = False , lowerCAmelCase : List[str] = None , ) -> Mapping[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Any =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch SCREAMING_SNAKE_CASE_: Optional[Any] =common_inputs['input_ids'].shape # Not using the same length for past_key_values SCREAMING_SNAKE_CASE_: Any =seqlen + 2 SCREAMING_SNAKE_CASE_: Dict =self.num_layers SCREAMING_SNAKE_CASE_: Optional[Any] =self.num_attention_heads SCREAMING_SNAKE_CASE_: Union[str, Any] =( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) SCREAMING_SNAKE_CASE_: Tuple =common_inputs['attention_mask'].dtype SCREAMING_SNAKE_CASE_: Optional[int] =torch.cat( [common_inputs["""attention_mask"""], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 ) SCREAMING_SNAKE_CASE_: Any =[ (torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase ) ] return common_inputs def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : List[Any] = False , lowerCAmelCase : Tuple = None , ) -> Mapping[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Tuple =compute_effective_axis_dimension( lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX SCREAMING_SNAKE_CASE_: List[str] =tokenizer.num_special_tokens_to_add(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: int =compute_effective_axis_dimension( lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence SCREAMING_SNAKE_CASE_: Optional[int] =[' '.join([tokenizer.unk_token] ) * seq_length] * batch_size SCREAMING_SNAKE_CASE_: List[str] =dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) ) return common_inputs def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any , lowerCAmelCase : List[str] = -1 , lowerCAmelCase : Optional[Any] = -1 , lowerCAmelCase : int = False , lowerCAmelCase : Tuple = None , ) -> Mapping[str, Any]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE_: Optional[Any] =self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) elif self.task == "causal-lm": SCREAMING_SNAKE_CASE_: Any =self._generate_dummy_inputs_for_causal_lm( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) else: SCREAMING_SNAKE_CASE_: Optional[int] =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) return common_inputs def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : int ) -> int: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE_: List[Any] =super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) else: SCREAMING_SNAKE_CASE_: Tuple =super(lowerCAmelCase , self )._flatten_past_key_values_( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
173
from __future__ import annotations def UpperCamelCase_( lowerCamelCase_ ) -> bool: if len(lowerCamelCase_ ) < 2: raise ValueError('Monogons and Digons are not polygons in the Euclidean space' ) if any(i <= 0 for i in nums ): raise ValueError('All values must be greater than 0' ) _lowercase : Tuple = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
21
0
"""simple docstring""" import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging _lowerCAmelCase :List[Any] = logging.get_logger(__name__) def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ): _UpperCAmelCase : Optional[Any] = nn.functional.normalize(UpperCamelCase__ ) _UpperCAmelCase : List[Any] = nn.functional.normalize(UpperCamelCase__ ) return torch.mm(UpperCamelCase__ , normalized_text_embeds.t() ) class _UpperCAmelCase ( a ): '''simple docstring''' a__ =CLIPConfig a__ =['''CLIPEncoderLayer'''] def __init__( self , A ) -> Union[str, Any]: super().__init__(A ) _UpperCAmelCase : Any = CLIPVisionModel(config.vision_config ) _UpperCAmelCase : Optional[int] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=A ) _UpperCAmelCase : Optional[Any] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=A ) _UpperCAmelCase : int = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=A ) _UpperCAmelCase : str = nn.Parameter(torch.ones(1_7 ) , requires_grad=A ) _UpperCAmelCase : Any = nn.Parameter(torch.ones(3 ) , requires_grad=A ) @torch.no_grad() def __lowerCAmelCase ( self , A , A ) -> List[Any]: _UpperCAmelCase : str = self.vision_model(A )[1] # pooled_output _UpperCAmelCase : Any = self.visual_projection(A ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _UpperCAmelCase : str = cosine_distance(A , self.special_care_embeds ).cpu().float().numpy() _UpperCAmelCase : List[str] = cosine_distance(A , self.concept_embeds ).cpu().float().numpy() _UpperCAmelCase : Any = [] _UpperCAmelCase : Dict = image_embeds.shape[0] for i in range(A ): _UpperCAmelCase : Union[str, Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images _UpperCAmelCase : Tuple = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): _UpperCAmelCase : Optional[Any] = special_cos_dist[i][concept_idx] _UpperCAmelCase : str = self.special_care_embeds_weights[concept_idx].item() _UpperCAmelCase : Optional[int] = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} ) _UpperCAmelCase : int = 0.01 for concept_idx in range(len(cos_dist[0] ) ): _UpperCAmelCase : Any = cos_dist[i][concept_idx] _UpperCAmelCase : Optional[int] = self.concept_embeds_weights[concept_idx].item() _UpperCAmelCase : List[Any] = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(A ) result.append(A ) _UpperCAmelCase : List[str] = [len(res['''bad_concepts'''] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def __lowerCAmelCase ( self , A , A ) -> List[Any]: _UpperCAmelCase : Dict = self.vision_model(A )[1] # pooled_output _UpperCAmelCase : Any = self.visual_projection(A ) _UpperCAmelCase : Optional[Any] = cosine_distance(A , self.special_care_embeds ) _UpperCAmelCase : List[Any] = cosine_distance(A , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images _UpperCAmelCase : int = 0.0 _UpperCAmelCase : Any = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) _UpperCAmelCase : List[Any] = torch.any(special_scores > 0 , dim=1 ) _UpperCAmelCase : Tuple = special_care * 0.01 _UpperCAmelCase : str = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) _UpperCAmelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) _UpperCAmelCase : str = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
359
"""simple docstring""" from __future__ import annotations def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ): if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): _UpperCAmelCase , _UpperCAmelCase : int = array[indexa], array[indexa] def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ): if length > 1: _UpperCAmelCase : str = int(length / 2 ) for i in range(UpperCamelCase__ , low + middle ): comp_and_swap(UpperCamelCase__ , UpperCamelCase__ , i + middle , UpperCamelCase__ ) bitonic_merge(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) bitonic_merge(UpperCamelCase__ , low + middle , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ): if length > 1: _UpperCAmelCase : str = int(length / 2 ) bitonic_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 1 ) bitonic_sort(UpperCamelCase__ , low + middle , UpperCamelCase__ , 0 ) bitonic_merge(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": _lowerCAmelCase :Any = input('Enter numbers separated by a comma:\n').strip() _lowerCAmelCase :Tuple = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
68
0
'''simple docstring''' from collections import defaultdict class A__ : def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> int: '''simple docstring''' _a : List[Any] =total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 _a : Union[str, Any] =[ [-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) ) ] _a : str =defaultdict(UpperCamelCase_ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 _a : Optional[int] =(1 << len(UpperCamelCase_ )) - 1 def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int ) -> Optional[int]: '''simple docstring''' if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement _a : str =self.count_ways_until(UpperCamelCase_ , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. _a : Optional[int] =total_ways_util return self.dp[mask][task_no] def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :int ) -> List[Any]: '''simple docstring''' for i in range(len(UpperCamelCase_ ) ): for j in task_performed[i]: self.task[j].append(UpperCamelCase_ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": A__: Optional[int] = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. A__: Optional[int] = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
276
'''simple docstring''' import re from filelock import FileLock try: import nltk __snake_case = True except (ImportError, ModuleNotFoundError): __snake_case = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def a ( __a ) -> str: '''simple docstring''' re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
97
0
import argparse from collections import defaultdict def lowerCamelCase ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' __UpperCamelCase :Union[str, Any] = f"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(_UpperCamelCase , '''r''' ) as f: __UpperCamelCase :Union[str, Any] = f.readlines() __UpperCamelCase :int = f"""class {class_name}(""" __UpperCamelCase :List[str] = f"""{4 * ' '}def {test_name}(""" __UpperCamelCase :str = f"""{8 * ' '}{correct_line.split()[0]}""" __UpperCamelCase :Union[str, Any] = f"""{16 * ' '}{correct_line.split()[0]}""" __UpperCamelCase :Dict = False __UpperCamelCase :str = False __UpperCamelCase :Optional[Any] = False __UpperCamelCase :List[Any] = False __UpperCamelCase :int = 0 __UpperCamelCase :Optional[int] = 0 __UpperCamelCase :str = [] for line in lines: if line.startswith(_UpperCamelCase ): __UpperCamelCase :Any = True elif in_class and line.startswith(_UpperCamelCase ): __UpperCamelCase :Optional[int] = True elif in_class and in_func and (line.startswith(_UpperCamelCase ) or line.startswith(_UpperCamelCase )): __UpperCamelCase :Optional[int] = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: __UpperCamelCase :str = True if in_class and in_func and in_line: if ")" not in line: continue else: __UpperCamelCase :Union[str, Any] = True if in_class and in_func and in_line and insert_line: new_lines.append(f"""{spaces * ' '}{correct_line}""" ) __UpperCamelCase :Any = False else: new_lines.append(_UpperCamelCase ) with open(_UpperCamelCase , '''w''' ) as f: for line in new_lines: f.write(_UpperCamelCase ) def lowerCamelCase ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any=None ): '''simple docstring''' if fail is not None: with open(_UpperCamelCase , '''r''' ) as f: __UpperCamelCase :Union[str, Any] = {l.strip() for l in f.readlines()} else: __UpperCamelCase :Optional[Any] = None with open(_UpperCamelCase , '''r''' ) as f: __UpperCamelCase :Optional[Any] = f.readlines() __UpperCamelCase :List[Any] = defaultdict(_UpperCamelCase ) for line in correct_lines: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = line.split(''';''' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) __lowercase = parser.parse_args() main(args.correct_filename, args.fail_filename)
359
import os import pytest from transformers.dynamic_module_utils import get_imports __lowercase = ''' import os ''' __lowercase = ''' def foo(): import os return False ''' __lowercase = ''' def foo(): def bar(): if True: import os return False return bar() ''' __lowercase = ''' import os try: import bar except ImportError: raise ValueError() ''' __lowercase = ''' import os def foo(): try: import bar except ImportError: raise ValueError() ''' __lowercase = ''' import os try: import bar except (ImportError, AttributeError): raise ValueError() ''' __lowercase = ''' import os try: import bar except ImportError as e: raise ValueError() ''' __lowercase = ''' import os try: import bar except: raise ValueError() ''' __lowercase = ''' import os try: import bar import baz except ImportError: raise ValueError() ''' __lowercase = ''' import os try: import bar import baz except ImportError: x = 1 raise ValueError() ''' __lowercase = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , SCREAMING_SNAKE_CASE ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''test_file.py''' ) with open(SCREAMING_SNAKE_CASE , '''w''' ) as _tmp_file: _tmp_file.write(SCREAMING_SNAKE_CASE ) __UpperCamelCase :Dict = get_imports(SCREAMING_SNAKE_CASE ) assert parsed_imports == ["os"]
105
0
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _a = 16 _a = 32 def __a ( __lowerCamelCase, __lowerCamelCase = 16, __lowerCamelCase = "bert-base-cased" ): UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(__lowerCAmelCase ) UpperCAmelCase_ : List[Any] = load_dataset("glue", "mrpc" ) def tokenize_function(__lowerCamelCase ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ : str = tokenizer(examples["sentence1"], examples["sentence2"], truncation=__lowerCAmelCase, max_length=__lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase_ : Union[str, Any] = datasets.map( __lowerCAmelCase, batched=__lowerCAmelCase, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=__lowerCAmelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ : List[Any] = tokenized_datasets.rename_column("label", "labels" ) def collate_fn(__lowerCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__lowerCAmelCase, padding="max_length", max_length=128, return_tensors="pt" ) return tokenizer.pad(__lowerCAmelCase, padding="longest", return_tensors="pt" ) # Instantiate dataloaders. UpperCAmelCase_ : str = DataLoader( tokenized_datasets["train"], shuffle=__lowerCAmelCase, collate_fn=__lowerCAmelCase, batch_size=__lowerCAmelCase ) UpperCAmelCase_ : Optional[Any] = DataLoader( tokenized_datasets["validation"], shuffle=__lowerCAmelCase, collate_fn=__lowerCAmelCase, batch_size=__lowerCAmelCase ) return train_dataloader, eval_dataloader def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): model.eval() UpperCAmelCase_ : Tuple = 0 for step, batch in enumerate(__lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ : Dict = model(**__lowerCAmelCase ) UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCAmelCase_ : Optional[Any] = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__lowerCAmelCase ) - 1: UpperCAmelCase_ : str = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase_ : Union[str, Any] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__lowerCAmelCase, references=__lowerCAmelCase, ) UpperCAmelCase_ : Dict = metric.compute() return eval_metric["accuracy"] def __a ( __lowerCamelCase, __lowerCamelCase ): # Initialize accelerator UpperCAmelCase_ : List[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ : List[str] = config["""lr"""] UpperCAmelCase_ : Any = int(config["num_epochs"] ) UpperCAmelCase_ : Optional[Any] = int(config["seed"] ) UpperCAmelCase_ : Union[str, Any] = int(config["batch_size"] ) UpperCAmelCase_ : List[str] = args.model_name_or_path set_seed(__lowerCAmelCase ) UpperCAmelCase_ : str = get_dataloaders(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ : Tuple = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase, return_dict=__lowerCAmelCase ) # Instantiate optimizer UpperCAmelCase_ : Optional[int] = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCAmelCase_ : Optional[Any] = optimizer_cls(params=model.parameters(), lr=__lowerCAmelCase ) if accelerator.state.deepspeed_plugin is not None: UpperCAmelCase_ : Any = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: UpperCAmelCase_ : List[Any] = 1 UpperCAmelCase_ : str = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCAmelCase_ : Dict = get_linear_schedule_with_warmup( optimizer=__lowerCAmelCase, num_warmup_steps=0, num_training_steps=__lowerCAmelCase, ) else: UpperCAmelCase_ : List[Any] = DummyScheduler(__lowerCAmelCase, total_num_steps=__lowerCAmelCase, warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ : List[Any] = accelerator.prepare( __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase_ : str = 0 # We also need to keep track of the stating epoch so files are named properly UpperCAmelCase_ : Tuple = 0 UpperCAmelCase_ : Union[str, Any] = evaluate.load("glue", "mrpc" ) UpperCAmelCase_ : Union[str, Any] = num_epochs if args.partial_train_epoch is not None: UpperCAmelCase_ : int = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) UpperCAmelCase_ : int = args.resume_from_checkpoint.split("epoch_" )[1] UpperCAmelCase_ : Tuple = """""" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break UpperCAmelCase_ : Optional[Any] = int(__lowerCAmelCase ) + 1 UpperCAmelCase_ : Tuple = evaluation_loop(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase ) accelerator.print("resumed checkpoint performance:", __lowerCAmelCase ) accelerator.print("resumed checkpoint's scheduler's lr:", lr_scheduler.get_lr()[0] ) accelerator.print("resumed optimizers's lr:", optimizer.param_groups[0]["lr"] ) with open(os.path.join(args.output_dir, f"""state_{starting_epoch-1}.json""" ), "r" ) as f: UpperCAmelCase_ : Dict = json.load(__lowerCAmelCase ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model UpperCAmelCase_ : List[str] = {} for epoch in range(__lowerCAmelCase, __lowerCAmelCase ): model.train() for step, batch in enumerate(__lowerCAmelCase ): UpperCAmelCase_ : Dict = model(**__lowerCAmelCase ) UpperCAmelCase_ : Dict = outputs.loss UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps accelerator.backward(__lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 UpperCAmelCase_ : Union[str, Any] = f"""epoch_{epoch}""" UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir, __lowerCAmelCase ) accelerator.save_state(__lowerCAmelCase ) UpperCAmelCase_ : int = evaluation_loop(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase ) UpperCAmelCase_ : Dict = accuracy UpperCAmelCase_ : Tuple = lr_scheduler.get_lr()[0] UpperCAmelCase_ : int = optimizer.param_groups[0]["""lr"""] UpperCAmelCase_ : Any = epoch UpperCAmelCase_ : List[str] = overall_step accelerator.print(f"""epoch {epoch}:""", __lowerCAmelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir, f"""state_{epoch}.json""" ), "w" ) as f: json.dump(__lowerCAmelCase, __lowerCAmelCase ) def __a ( ): UpperCAmelCase_ : Dict = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path", type=__lowerCAmelCase, default="bert-base-cased", help="Path to pretrained model or model identifier from huggingface.co/models.", required=__lowerCAmelCase, ) parser.add_argument( "--output_dir", type=__lowerCAmelCase, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--resume_from_checkpoint", type=__lowerCAmelCase, default=__lowerCAmelCase, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--partial_train_epoch", type=__lowerCAmelCase, default=__lowerCAmelCase, help="If passed, the training will stop after this number of epochs.", ) parser.add_argument( "--num_epochs", type=__lowerCAmelCase, default=2, help="Number of train epochs.", ) UpperCAmelCase_ : Any = parser.parse_args() UpperCAmelCase_ : Union[str, Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(__lowerCAmelCase, __lowerCAmelCase ) if __name__ == "__main__": main()
61
"""simple docstring""" import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class __a (unittest.TestCase): '''simple docstring''' def __init__( self , _a , _a = True , _a = None , _a = 32 , _a = True , _a = 1 / 255 , _a = True , _a = True , _a = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _a = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _a = True , _a=7 , _a=30 , _a=400 , _a=3 , ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = parent SCREAMING_SNAKE_CASE__ : Tuple = do_resize SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 288} SCREAMING_SNAKE_CASE__ : List[str] = size_divisor SCREAMING_SNAKE_CASE__ : Tuple = do_rescale SCREAMING_SNAKE_CASE__ : List[str] = rescale_factor SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_normalize SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_mean SCREAMING_SNAKE_CASE__ : List[str] = image_std SCREAMING_SNAKE_CASE__ : List[str] = do_pad SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE__ : int = num_channels SCREAMING_SNAKE_CASE__ : Dict = min_resolution SCREAMING_SNAKE_CASE__ : str = max_resolution def _a ( self ) -> List[str]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def _a ( self , _a , _a=False ) -> int: """simple docstring""" if not batched: SCREAMING_SNAKE_CASE__ : List[Any] = self.size["""shortest_edge"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_inputs[0] if isinstance(_a , Image.Image ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = image.size else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = image.shape[1], image.shape[2] SCREAMING_SNAKE_CASE__ : Tuple = size / min(_a , _a ) if h < w: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = size, scale * w else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((1_333 / 800) * size ) if max(_a , _a ) > max_size: SCREAMING_SNAKE_CASE__ : List[str] = max_size / max(_a , _a ) SCREAMING_SNAKE_CASE__ : Any = newh * scale SCREAMING_SNAKE_CASE__ : Any = neww * scale SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = int(newh + 0.5 ), int(neww + 0.5 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: SCREAMING_SNAKE_CASE__ : Dict = [] for image in image_inputs: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) SCREAMING_SNAKE_CASE__ : Any = max(_a , key=lambda _a : item[0] )[0] SCREAMING_SNAKE_CASE__ : Any = max(_a , key=lambda _a : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __a (UpperCamelCase_ , unittest.TestCase): '''simple docstring''' _SCREAMING_SNAKE_CASE :Optional[int] = BridgeTowerImageProcessor if is_vision_available() else None def _a ( self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = BridgeTowerImageProcessingTester(self ) @property def _a ( self ) -> Optional[int]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _a ( self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , """image_mean""" ) ) self.assertTrue(hasattr(_a , """image_std""" ) ) self.assertTrue(hasattr(_a , """do_normalize""" ) ) self.assertTrue(hasattr(_a , """do_resize""" ) ) self.assertTrue(hasattr(_a , """size""" ) ) self.assertTrue(hasattr(_a , """size_divisor""" ) ) def _a ( self ) -> List[str]: """simple docstring""" pass def _a ( self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.image_processor_tester.get_expected_values(_a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE__ : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor_tester.get_expected_values(_a , batched=_a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(_a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE__ : Tuple = image_processing(_a , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(_a , batched=_a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(_a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE__ : Dict = image_processing(_a , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.image_processor_tester.get_expected_values(_a , batched=_a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
132
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _A = logging.get_logger(__name__) class lowercase_ ( __SCREAMING_SNAKE_CASE ): A__ : Union[str, Any] = ["""pixel_values"""] def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_5_5 , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ): """simple docstring""" super().__init__(**__UpperCamelCase ) UpperCamelCase_ = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4} UpperCamelCase_ = get_size_dict(__UpperCamelCase ) UpperCamelCase_ = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4} UpperCamelCase_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase , param_name="""crop_size""" ) UpperCamelCase_ = do_resize UpperCamelCase_ = do_rescale UpperCamelCase_ = do_normalize UpperCamelCase_ = do_center_crop UpperCamelCase_ = crop_size UpperCamelCase_ = size UpperCamelCase_ = resample UpperCamelCase_ = rescale_factor UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN UpperCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , ): """simple docstring""" UpperCamelCase_ = get_size_dict(__UpperCamelCase ) if "shortest_edge" in size: UpperCamelCase_ = get_resize_output_image_size(__UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=__UpperCamelCase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: UpperCamelCase_ = (size["""height"""], size["""width"""]) else: raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ): """simple docstring""" UpperCamelCase_ = get_size_dict(__UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase ): """simple docstring""" return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ): """simple docstring""" return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , ): """simple docstring""" UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size UpperCamelCase_ = get_size_dict(__UpperCamelCase , param_name="""crop_size""" , default_to_square=__UpperCamelCase ) UpperCamelCase_ = resample if resample is not None else self.resample UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean UpperCamelCase_ = image_std if image_std is not None else self.image_std UpperCamelCase_ = size if size is not None else self.size UpperCamelCase_ = get_size_dict(__UpperCamelCase ) if not is_batched(__UpperCamelCase ): UpperCamelCase_ = [images] if not valid_images(__UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. UpperCamelCase_ = [to_numpy_array(__UpperCamelCase ) for image in images] if do_resize: UpperCamelCase_ = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images] if do_center_crop: UpperCamelCase_ = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images] if do_rescale: UpperCamelCase_ = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images] if do_normalize: UpperCamelCase_ = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images] UpperCamelCase_ = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images] UpperCamelCase_ = {"""pixel_values""": images} return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
261
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _A = logging.get_logger(__name__) class lowercase_ ( __SCREAMING_SNAKE_CASE ): A__ : Union[str, Any] = ["""pixel_values"""] def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_5_5 , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ): """simple docstring""" super().__init__(**__UpperCamelCase ) UpperCamelCase_ = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4} UpperCamelCase_ = get_size_dict(__UpperCamelCase ) UpperCamelCase_ = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4} UpperCamelCase_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase , param_name="""crop_size""" ) UpperCamelCase_ = do_resize UpperCamelCase_ = do_rescale UpperCamelCase_ = do_normalize UpperCamelCase_ = do_center_crop UpperCamelCase_ = crop_size UpperCamelCase_ = size UpperCamelCase_ = resample UpperCamelCase_ = rescale_factor UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN UpperCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , ): """simple docstring""" UpperCamelCase_ = get_size_dict(__UpperCamelCase ) if "shortest_edge" in size: UpperCamelCase_ = get_resize_output_image_size(__UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=__UpperCamelCase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: UpperCamelCase_ = (size["""height"""], size["""width"""]) else: raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ): """simple docstring""" UpperCamelCase_ = get_size_dict(__UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase ): """simple docstring""" return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ): """simple docstring""" return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , ): """simple docstring""" UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size UpperCamelCase_ = get_size_dict(__UpperCamelCase , param_name="""crop_size""" , default_to_square=__UpperCamelCase ) UpperCamelCase_ = resample if resample is not None else self.resample UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean UpperCamelCase_ = image_std if image_std is not None else self.image_std UpperCamelCase_ = size if size is not None else self.size UpperCamelCase_ = get_size_dict(__UpperCamelCase ) if not is_batched(__UpperCamelCase ): UpperCamelCase_ = [images] if not valid_images(__UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. UpperCamelCase_ = [to_numpy_array(__UpperCamelCase ) for image in images] if do_resize: UpperCamelCase_ = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images] if do_center_crop: UpperCamelCase_ = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images] if do_rescale: UpperCamelCase_ = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images] if do_normalize: UpperCamelCase_ = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images] UpperCamelCase_ = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images] UpperCamelCase_ = {"""pixel_values""": images} return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
261
1
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase_ = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } UpperCamelCase_ = { 'gpt-neox-20b': 2048, } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : Optional[int] = VOCAB_FILES_NAMES a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP a_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ : str = ["""input_ids""", """attention_mask"""] def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ) ->Union[str, Any]: super().__init__( __UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , ) a_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("add_prefix_space" , __UpperCAmelCase) != add_prefix_space: a_ = getattr(__UpperCAmelCase , pre_tok_state.pop("type")) a_ = add_prefix_space a_ = pre_tok_class(**__UpperCAmelCase) a_ = add_prefix_space def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->Tuple[str]: a_ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase) return tuple(__UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[int]: a_ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase) + [self.eos_token_id]) if len(__UpperCAmelCase) > self.model_max_length: a_ = input_ids[-self.model_max_length :] return input_ids
243
"""simple docstring""" import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class snake_case : def __init__( self , __UpperCAmelCase = "cpu" , __UpperCAmelCase = "openai/clip-vit-large-patch14") ->None: a_ = device a_ = CLIPTokenizerFast.from_pretrained(__UpperCAmelCase) a_ = [0.48_145_466, 0.4_578_275, 0.40_821_073] a_ = [0.26_862_954, 0.26_130_258, 0.27_577_711] a_ = torchvision.transforms.Normalize(self.image_mean , self.image_std) a_ = torchvision.transforms.Resize(2_24) a_ = torchvision.transforms.CenterCrop(2_24) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[Any]: a_ = self.resize(__UpperCAmelCase) a_ = self.center_crop(__UpperCAmelCase) a_ = self.normalize(__UpperCAmelCase) return images def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase) ->Union[str, Any]: a_ = self.tokenizer(text=__UpperCAmelCase , **__UpperCAmelCase) a_ = self.preprocess_img(__UpperCAmelCase) a_ = {key: value.to(self.device) for (key, value) in encoding.items()} return encoding class snake_case ( nn.Module ): def __init__( self , __UpperCAmelCase=10 , __UpperCAmelCase=0.01 , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="image" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , ) ->None: super().__init__() a_ = None a_ = device if device else get_device() if vqgan: a_ = vqgan else: a_ = load_vqgan(self.device , conf_path=__UpperCAmelCase , ckpt_path=__UpperCAmelCase) self.vqgan.eval() if clip: a_ = clip else: a_ = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") self.clip.to(self.device) a_ = ProcessorGradientFlow(device=self.device) a_ = iterations a_ = lr a_ = log a_ = make_grid a_ = return_val a_ = quantize a_ = self.vqgan.decoder.z_shape def UpperCAmelCase__ ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=5 , __UpperCAmelCase=True) ->Any: a_ = [] if output_path is None: a_ = "./animation.gif" if input_path is None: a_ = self.save_path a_ = sorted(glob(input_path + "/*")) if not len(__UpperCAmelCase): raise ValueError( "No images found in save path, aborting (did you pass save_intermediate=True to the generate" " function?)") if len(__UpperCAmelCase) == 1: print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)") a_ = total_duration / len(__UpperCAmelCase) a_ = [frame_duration] * len(__UpperCAmelCase) if extend_frames: a_ = 1.5 a_ = 3 for file_name in paths: if file_name.endswith(".png"): images.append(imageio.imread(__UpperCAmelCase)) imageio.mimsave(__UpperCAmelCase , __UpperCAmelCase , duration=__UpperCAmelCase) print(F'''gif saved to {output_path}''') def UpperCAmelCase__ ( self , __UpperCAmelCase=None , __UpperCAmelCase=None) ->List[Any]: if not (path or img): raise ValueError("Input either path or tensor") if img is not None: raise NotImplementedError a_ = preprocess(Image.open(__UpperCAmelCase) , target_image_size=2_56).to(self.device) a_ = preprocess_vqgan(__UpperCAmelCase) a_ , *a_ = self.vqgan.encode(__UpperCAmelCase) return z def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tuple: a_ = self.latent.detach().requires_grad_() a_ = base_latent + transform_vector if self.quantize: a_ , *a_ = self.vqgan.quantize(__UpperCAmelCase) else: a_ = trans_latent return self.vqgan.decode(__UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None) ->str: a_ = self.clip_preprocessor(text=__UpperCAmelCase , images=__UpperCAmelCase , return_tensors="pt" , padding=__UpperCAmelCase) a_ = self.clip(**__UpperCAmelCase) a_ = clip_outputs.logits_per_image if weights is not None: a_ = similarity_logits * weights return similarity_logits.sum() def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]: a_ = self._get_clip_similarity(pos_prompts["prompts"] , __UpperCAmelCase , weights=(1 / pos_prompts["weights"])) if neg_prompts: a_ = self._get_clip_similarity(neg_prompts["prompts"] , __UpperCAmelCase , weights=neg_prompts["weights"]) else: a_ = torch.tensor([1] , device=self.device) a_ = -torch.log(__UpperCAmelCase) + torch.log(__UpperCAmelCase) return loss def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int: a_ = torch.randn_like(self.latent , requires_grad=__UpperCAmelCase , device=self.device) a_ = torch.optim.Adam([vector] , lr=self.lr) for i in range(self.iterations): optim.zero_grad() a_ = self._add_vector(__UpperCAmelCase) a_ = loop_post_process(__UpperCAmelCase) a_ = self._get_CLIP_loss(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) print("CLIP loss" , __UpperCAmelCase) if self.log: wandb.log({"CLIP Loss": clip_loss}) clip_loss.backward(retain_graph=__UpperCAmelCase) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0]) else: yield vector def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Tuple: wandb.init(reinit=__UpperCAmelCase , project="face-editor") wandb.config.update({"Positive Prompts": positive_prompts}) wandb.config.update({"Negative Prompts": negative_prompts}) wandb.config.update({"lr": self.lr, "iterations": self.iterations}) if image_path: a_ = Image.open(__UpperCAmelCase) a_ = image.resize((2_56, 2_56)) wandb.log("Original Image" , wandb.Image(__UpperCAmelCase)) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[str]: if not prompts: return [] a_ = [] a_ = [] if isinstance(__UpperCAmelCase , __UpperCAmelCase): a_ = [prompt.strip() for prompt in prompts.split("|")] for prompt in prompts: if isinstance(__UpperCAmelCase , (tuple, list)): a_ = prompt[0] a_ = float(prompt[1]) elif ":" in prompt: a_ , a_ = prompt.split(":") a_ = float(__UpperCAmelCase) else: a_ = prompt a_ = 1.0 processed_prompts.append(__UpperCAmelCase) weights.append(__UpperCAmelCase) return { "prompts": processed_prompts, "weights": torch.tensor(__UpperCAmelCase , device=self.device), } def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->List[Any]: if image_path: a_ = self._get_latent(__UpperCAmelCase) else: a_ = torch.randn(self.latent_dim , device=self.device) if self.log: self._init_logging(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) assert pos_prompts, "You must provide at least one positive prompt." a_ = self.process_prompts(__UpperCAmelCase) a_ = self.process_prompts(__UpperCAmelCase) if save_final and save_path is None: a_ = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"])) if not os.path.exists(__UpperCAmelCase): os.makedirs(__UpperCAmelCase) else: a_ = save_path + "_" + get_timestamp() os.makedirs(__UpperCAmelCase) a_ = save_path a_ = self.vqgan.decode(self.latent)[0] if show_intermediate: print("Original Image") show_pil(custom_to_pil(__UpperCAmelCase)) a_ = loop_post_process(__UpperCAmelCase) for iter, transformed_img in enumerate(self._optimize_CLIP(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)): if show_intermediate: show_pil(__UpperCAmelCase) if save_intermediate: transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''')) if self.log: wandb.log({"Image": wandb.Image(__UpperCAmelCase)}) if show_final: show_pil(__UpperCAmelCase) if save_final: transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png'''))
243
1
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _lowercase: Dict = get_tests_dir("fixtures") class _lowercase ( unittest.TestCase ): """simple docstring""" def UpperCamelCase_ (self ): """simple docstring""" a = mock.Mock() a = 500 a = {} a = HTTPError a = {} # Download this model to make sure it's in the cache. a = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=lowerCamelCase_ ) as mock_head: a = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ (self ): """simple docstring""" a = WavaVecaFeatureExtractor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" ) @is_staging_test class _lowercase ( unittest.TestCase ): """simple docstring""" @classmethod def UpperCamelCase_ (cls ): """simple docstring""" a = TOKEN HfFolder.save_token(lowerCamelCase_ ) @classmethod def UpperCamelCase_ (cls ): """simple docstring""" try: delete_repo(token=cls._token , repo_id="test-feature-extractor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" ) except HTTPError: pass def UpperCamelCase_ (self ): """simple docstring""" a = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase_ ) feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token ) a = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( lowerCamelCase_ , repo_id="test-feature-extractor" , push_to_hub=lowerCamelCase_ , use_auth_token=self._token ) a = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) def UpperCamelCase_ (self ): """simple docstring""" a = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase_ ) feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token ) a = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( lowerCamelCase_ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=lowerCamelCase_ , use_auth_token=self._token ) a = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) def UpperCamelCase_ (self ): """simple docstring""" CustomFeatureExtractor.register_for_auto_class() a = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ ) feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , ) a = AutoFeatureExtractor.from_pretrained( F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
352
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _lowercase ( lowerCAmelCase ): """simple docstring""" __A = ["image_processor", "tokenizer"] __A = "ViTImageProcessor" __A = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ): """simple docstring""" a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowerCamelCase_ , ) a = kwargs.pop("feature_extractor" ) a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowerCamelCase_ , lowerCamelCase_ ) def __call__(self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ): """simple docstring""" if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: a = self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ ) if visual_prompt is not None: a = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ ) if images is not None: a = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ ) if visual_prompt is not None and images is not None: a = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: a = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: a = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**lowerCamelCase_ ) , tensor_type=lowerCamelCase_ ) def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ ) def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ ) @property def UpperCamelCase_ (self ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase_ , ) return self.image_processor_class @property def UpperCamelCase_ (self ): """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase_ , ) return self.image_processor
71
0
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ): __UpperCamelCase =FileLock(str(tmpdir / 'foo.lock' ) ) __UpperCamelCase =FileLock(str(tmpdir / 'foo.lock' ) ) __UpperCamelCase =0.01 with locka.acquire(): with pytest.raises(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =time.time() locka.acquire(SCREAMING_SNAKE_CASE__ ) assert time.time() - _start > timeout def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase ='a' * 10_00 + '.lock' __UpperCamelCase =FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('.lock' ) assert not locka._lock_file.endswith(SCREAMING_SNAKE_CASE__ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_55 __UpperCamelCase =FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(SCREAMING_SNAKE_CASE__ ): locka.acquire(0 )
62
from ...configuration_utils import PretrainedConfig from ...utils import logging A : str = logging.get_logger(__name__) A : int = { "alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "mgp-str" def __init__( self : List[str] , __lowerCamelCase : List[Any]=[32, 128] , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=27 , __lowerCamelCase : List[str]=38 , __lowerCamelCase : Dict=50257 , __lowerCamelCase : List[Any]=30522 , __lowerCamelCase : Optional[Any]=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : List[str]=4.0 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=False , __lowerCamelCase : Dict=1E-5 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=0.0_2 , **__lowerCamelCase : Dict , ): '''simple docstring''' super().__init__(**__lowerCamelCase ) lowerCamelCase__ : int = image_size lowerCamelCase__ : Union[str, Any] = patch_size lowerCamelCase__ : Dict = num_channels lowerCamelCase__ : Union[str, Any] = max_token_length lowerCamelCase__ : Optional[int] = num_character_labels lowerCamelCase__ : Union[str, Any] = num_bpe_labels lowerCamelCase__ : Optional[int] = num_wordpiece_labels lowerCamelCase__ : List[str] = hidden_size lowerCamelCase__ : Dict = num_hidden_layers lowerCamelCase__ : Any = num_attention_heads lowerCamelCase__ : Any = mlp_ratio lowerCamelCase__ : List[Any] = distilled lowerCamelCase__ : Optional[Any] = layer_norm_eps lowerCamelCase__ : Union[str, Any] = drop_rate lowerCamelCase__ : List[Any] = qkv_bias lowerCamelCase__ : int = attn_drop_rate lowerCamelCase__ : List[Any] = drop_path_rate lowerCamelCase__ : List[str] = output_aa_attentions lowerCamelCase__ : Dict = initializer_range
184
0
"""simple docstring""" from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class lowerCamelCase : '''simple docstring''' SCREAMING_SNAKE_CASE = 4_2 # [batch_size x 3] SCREAMING_SNAKE_CASE = 4_2 # [batch_size x 3] SCREAMING_SNAKE_CASE = 4_2 # [batch_size x 3] SCREAMING_SNAKE_CASE = 4_2 # [batch_size x 3] SCREAMING_SNAKE_CASE = 4_2 SCREAMING_SNAKE_CASE = 4_2 SCREAMING_SNAKE_CASE = 4_2 SCREAMING_SNAKE_CASE = 4_2 SCREAMING_SNAKE_CASE = 4_2 def _a (self ): """simple docstring""" assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def _a (self ): """simple docstring""" return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def _a (self ): """simple docstring""" return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def _a (self ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = torch.arange(self.height * self.width ) UpperCAmelCase__ : Union[str, Any] = torch.stack( [ pixel_indices % self.width, torch.div(_lowerCamelCase , self.width , rounding_mode="""trunc""" ), ] , axis=1 , ) return coords @property def _a (self ): """simple docstring""" UpperCAmelCase__ : Any = self.shape UpperCAmelCase__ : Any = int(np.prod(_lowerCamelCase ) ) UpperCAmelCase__ : Union[str, Any] = self.get_image_coords() UpperCAmelCase__ : Optional[int] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) UpperCAmelCase__ : int = self.get_camera_rays(_lowerCamelCase ) UpperCAmelCase__ : Any = rays.view(_lowerCamelCase , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def _a (self , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : str = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] UpperCAmelCase__ : str = coords.view(_lowerCamelCase , -1 , 2 ) UpperCAmelCase__ : Optional[Any] = self.resolution() UpperCAmelCase__ : List[Any] = self.fov() UpperCAmelCase__ : Union[str, Any] = (flat.float() / (res - 1)) * 2 - 1 UpperCAmelCase__ : List[Any] = fracs * torch.tan(fov / 2 ) UpperCAmelCase__ : Tuple = fracs.view(_lowerCamelCase , -1 , 2 ) UpperCAmelCase__ : List[Any] = ( self.z.view(_lowerCamelCase , 1 , 3 ) + self.x.view(_lowerCamelCase , 1 , 3 ) * fracs[:, :, :1] + self.y.view(_lowerCamelCase , 1 , 3 ) * fracs[:, :, 1:] ) UpperCAmelCase__ : Any = directions / directions.norm(dim=-1 , keepdim=_lowerCamelCase ) UpperCAmelCase__ : List[Any] = torch.stack( [ torch.broadcast_to(self.origin.view(_lowerCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(_lowerCamelCase , *_lowerCamelCase , 2 , 3 ) def _a (self , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=_lowerCamelCase , height=_lowerCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , ) def a__ ( lowerCAmelCase ) -> DifferentiableProjectiveCamera: UpperCAmelCase__ : List[Any] = [] UpperCAmelCase__ : Optional[Any] = [] UpperCAmelCase__ : List[str] = [] UpperCAmelCase__ : Any = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): UpperCAmelCase__ : int = np.array([np.sin(lowerCAmelCase ), np.cos(lowerCAmelCase ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) UpperCAmelCase__ : List[Any] = -z * 4 UpperCAmelCase__ : Optional[int] = np.array([np.cos(lowerCAmelCase ), -np.sin(lowerCAmelCase ), 0.0] ) UpperCAmelCase__ : str = np.cross(lowerCAmelCase , lowerCAmelCase ) origins.append(lowerCAmelCase ) xs.append(lowerCAmelCase ) ys.append(lowerCAmelCase ) zs.append(lowerCAmelCase ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , width=lowerCAmelCase , height=lowerCAmelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCAmelCase )) , )
355
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase : '''simple docstring''' def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[2, 2, 3, 2] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=["stage2", "stage3", "stage4"] , _lowerCamelCase=3 , _lowerCamelCase=None , ): """simple docstring""" UpperCAmelCase__ : int = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Union[str, Any] = image_size UpperCAmelCase__ : Optional[Any] = num_channels UpperCAmelCase__ : Optional[int] = num_stages UpperCAmelCase__ : Optional[Any] = hidden_sizes UpperCAmelCase__ : Any = depths UpperCAmelCase__ : str = is_training UpperCAmelCase__ : Tuple = use_labels UpperCAmelCase__ : Optional[Any] = intermediate_size UpperCAmelCase__ : Optional[Any] = hidden_act UpperCAmelCase__ : Tuple = type_sequence_label_size UpperCAmelCase__ : Dict = initializer_range UpperCAmelCase__ : Tuple = out_features UpperCAmelCase__ : Dict = num_labels UpperCAmelCase__ : Tuple = scope UpperCAmelCase__ : Optional[int] = num_stages def _a (self ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : List[Any] = None if self.use_labels: UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Dict = self.get_config() return config, pixel_values, labels def _a (self ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def _a (self ): """simple docstring""" return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowerCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , ) def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Any = UperNetForSemanticSegmentation(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() UpperCAmelCase__ : Tuple = model(_lowerCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def _a (self ): """simple docstring""" UpperCAmelCase__ : Any = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Union[str, Any] = config_and_inputs UpperCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = (UperNetForSemanticSegmentation,) if is_torch_available() else () SCREAMING_SNAKE_CASE = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {} SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def _a (self ): """simple docstring""" UpperCAmelCase__ : List[str] = UperNetModelTester(self ) UpperCAmelCase__ : Dict = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def _a (self ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a (self ): """simple docstring""" return def _a (self ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : List[str] = model_class(_lowerCamelCase ) UpperCAmelCase__ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : List[Any] = [*signature.parameters.keys()] UpperCAmelCase__ : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _a (self ): """simple docstring""" UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def _a (self ): """simple docstring""" pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def _a (self ): """simple docstring""" pass @unittest.skip(reason="""UperNet does not have a base model""" ) def _a (self ): """simple docstring""" pass @unittest.skip(reason="""UperNet does not have a base model""" ) def _a (self ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def _a (self ): """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _a (self ): """simple docstring""" pass def _a (self ): """simple docstring""" def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): UpperCAmelCase__ : List[Any] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): UpperCAmelCase__ : int = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) UpperCAmelCase__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase__ : Tuple = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[Any] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ : Any = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def _a (self ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : List[Any] = _config_zero_init(_lowerCamelCase ) UpperCAmelCase__ : List[str] = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: UpperCAmelCase__ : List[str] = model_class(config=_lowerCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def _a (self ): """simple docstring""" pass @slow def _a (self ): """simple docstring""" for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : str = UperNetForSemanticSegmentation.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def a__ ( ) -> List[Any]: UpperCAmelCase__ : List[str] = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) UpperCAmelCase__ : str = Image.open(lowerCAmelCase ).convert("""RGB""" ) return image @require_torch @require_vision @slow class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _a (self ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) UpperCAmelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_lowerCamelCase ) UpperCAmelCase__ : Optional[Any] = prepare_img() UpperCAmelCase__ : int = processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase ) with torch.no_grad(): UpperCAmelCase__ : Any = model(**_lowerCamelCase ) UpperCAmelCase__ : List[Any] = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) UpperCAmelCase__ : Optional[Any] = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) ) def _a (self ): """simple docstring""" UpperCAmelCase__ : Tuple = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) UpperCAmelCase__ : List[str] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_lowerCamelCase ) UpperCAmelCase__ : Tuple = prepare_img() UpperCAmelCase__ : int = processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase ) with torch.no_grad(): UpperCAmelCase__ : int = model(**_lowerCamelCase ) UpperCAmelCase__ : str = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) UpperCAmelCase__ : int = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
166
0
"""simple docstring""" import argparse import collections import json import os import re import string import sys import numpy as np _UpperCamelCase : Any = re.compile(r"\b(a|an|the)\b", re.UNICODE) _UpperCamelCase : Union[str, Any] = None def a_ ( ): '''simple docstring''' lowercase__ : Optional[int] = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' ) parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' ) parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' ) parser.add_argument( '--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' ) parser.add_argument( '--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' ) parser.add_argument( '--na-prob-thresh' , '-t' , type=_lowerCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , ) parser.add_argument( '--out-image-dir' , '-p' , metavar='out_images' , default=_lowerCAmelCase , help='Save precision-recall curves to directory.' ) parser.add_argument('--verbose' , '-v' , action='store_true' ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def a_ ( _lowerCAmelCase : Optional[Any] ): '''simple docstring''' lowercase__ : Tuple = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowercase__ : Optional[int] = bool(qa['answers']['text'] ) return qid_to_has_ans def a_ ( _lowerCAmelCase : Any ): '''simple docstring''' def remove_articles(_lowerCAmelCase : int ): return ARTICLES_REGEX.sub(' ' , _lowerCAmelCase ) def white_space_fix(_lowerCAmelCase : str ): return " ".join(text.split() ) def remove_punc(_lowerCAmelCase : List[Any] ): lowercase__ : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCAmelCase : List[str] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def a_ ( _lowerCAmelCase : Union[str, Any] ): '''simple docstring''' if not s: return [] return normalize_answer(_lowerCAmelCase ).split() def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ): '''simple docstring''' return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) ) def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ): '''simple docstring''' lowercase__ : Dict = get_tokens(_lowerCAmelCase ) lowercase__ : List[str] = get_tokens(_lowerCAmelCase ) lowercase__ : List[Any] = collections.Counter(_lowerCAmelCase ) & collections.Counter(_lowerCAmelCase ) lowercase__ : int = sum(common.values() ) if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 lowercase__ : Any = 1.0 * num_same / len(_lowerCAmelCase ) lowercase__ : Dict = 1.0 * num_same / len(_lowerCAmelCase ) lowercase__ : Any = (2 * precision * recall) / (precision + recall) return fa def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ): '''simple docstring''' lowercase__ : Optional[int] = {} lowercase__ : Union[str, Any] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowercase__ : Any = qa['id'] lowercase__ : Union[str, Any] = [t for t in qa['answers']['text'] if normalize_answer(_lowerCAmelCase )] if not gold_answers: # For unanswerable questions, only correct answer is empty string lowercase__ : Dict = [''] if qid not in preds: print(f"""Missing prediction for {qid}""" ) continue lowercase__ : Optional[int] = preds[qid] # Take max over all gold answers lowercase__ : int = max(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for a in gold_answers ) lowercase__ : Optional[Any] = max(compute_fa(_lowerCAmelCase , _lowerCAmelCase ) for a in gold_answers ) return exact_scores, fa_scores def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ): '''simple docstring''' lowercase__ : str = {} for qid, s in scores.items(): lowercase__ : int = na_probs[qid] > na_prob_thresh if pred_na: lowercase__ : Optional[Any] = float(not qid_to_has_ans[qid] ) else: lowercase__ : Optional[Any] = s return new_scores def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=None ): '''simple docstring''' if not qid_list: lowercase__ : Optional[Any] = len(_lowerCAmelCase ) return collections.OrderedDict( [ ('exact', 1_0_0.0 * sum(exact_scores.values() ) / total), ('f1', 1_0_0.0 * sum(fa_scores.values() ) / total), ('total', total), ] ) else: lowercase__ : Optional[Any] = len(_lowerCAmelCase ) return collections.OrderedDict( [ ('exact', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total), ('f1', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total), ('total', total), ] ) def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ): '''simple docstring''' for k in new_eval: lowercase__ : int = new_eval[k] def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ): '''simple docstring''' plt.step(_lowerCAmelCase , _lowerCAmelCase , color='b' , alpha=0.2 , where='post' ) plt.fill_between(_lowerCAmelCase , _lowerCAmelCase , step='post' , alpha=0.2 , color='b' ) plt.xlabel('Recall' ) plt.ylabel('Precision' ) plt.xlim([0.0, 1.0_5] ) plt.ylim([0.0, 1.0_5] ) plt.title(_lowerCAmelCase ) plt.savefig(_lowerCAmelCase ) plt.clf() def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[str]=None ): '''simple docstring''' lowercase__ : Optional[int] = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : na_probs[k] ) lowercase__ : Tuple = 0.0 lowercase__ : List[str] = 1.0 lowercase__ : List[str] = 0.0 lowercase__ : Union[str, Any] = [1.0] lowercase__ : List[Any] = [0.0] lowercase__ : Optional[int] = 0.0 for i, qid in enumerate(_lowerCAmelCase ): if qid_to_has_ans[qid]: true_pos += scores[qid] lowercase__ : Tuple = true_pos / float(i + 1 ) lowercase__ : Union[str, Any] = true_pos / float(_lowerCAmelCase ) if i == len(_lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_lowerCAmelCase ) recalls.append(_lowerCAmelCase ) if out_image: plot_pr_curve(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return {"ap": 1_0_0.0 * avg_prec} def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ): '''simple docstring''' if out_image_dir and not os.path.exists(_lowerCAmelCase ): os.makedirs(_lowerCAmelCase ) lowercase__ : List[str] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return lowercase__ : Dict = make_precision_recall_eval( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , ) lowercase__ : Tuple = make_precision_recall_eval( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , ) lowercase__ : List[Any] = {k: float(_lowerCAmelCase ) for k, v in qid_to_has_ans.items()} lowercase__ : Any = make_precision_recall_eval( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , ) merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'pr_exact' ) merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'pr_f1' ) merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'pr_oracle' ) def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ): '''simple docstring''' if not qid_list: return lowercase__ : List[str] = [na_probs[k] for k in qid_list] lowercase__ : Tuple = np.ones_like(_lowerCAmelCase ) / float(len(_lowerCAmelCase ) ) plt.hist(_lowerCAmelCase , weights=_lowerCAmelCase , bins=20 , range=(0.0, 1.0) ) plt.xlabel('Model probability of no-answer' ) plt.ylabel('Proportion of dataset' ) plt.title(f"""Histogram of no-answer probability: {name}""" ) plt.savefig(os.path.join(_lowerCAmelCase , f"""na_prob_hist_{name}.png""" ) ) plt.clf() def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ): '''simple docstring''' lowercase__ : Tuple = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) lowercase__ : int = num_no_ans lowercase__ : Optional[int] = cur_score lowercase__ : Tuple = 0.0 lowercase__ : Dict = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : na_probs[k] ) for i, qid in enumerate(_lowerCAmelCase ): if qid not in scores: continue if qid_to_has_ans[qid]: lowercase__ : Optional[int] = scores[qid] else: if preds[qid]: lowercase__ : List[Any] = -1 else: lowercase__ : Optional[int] = 0 cur_score += diff if cur_score > best_score: lowercase__ : Dict = cur_score lowercase__ : Optional[int] = na_probs[qid] return 1_0_0.0 * best_score / len(_lowerCAmelCase ), best_thresh def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ): '''simple docstring''' lowercase__ , lowercase__ : List[Any] = find_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) lowercase__ , lowercase__ : Dict = find_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) lowercase__ : Any = best_exact lowercase__ : Tuple = exact_thresh lowercase__ : Optional[Any] = best_fa lowercase__ : Any = fa_thresh def a_ ( ): '''simple docstring''' with open(OPTS.data_file ) as f: lowercase__ : List[Any] = json.load(_lowerCAmelCase ) lowercase__ : Union[str, Any] = dataset_json['data'] with open(OPTS.pred_file ) as f: lowercase__ : str = json.load(_lowerCAmelCase ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: lowercase__ : Union[str, Any] = json.load(_lowerCAmelCase ) else: lowercase__ : str = {k: 0.0 for k in preds} lowercase__ : int = make_qid_to_has_ans(_lowerCAmelCase ) # maps qid to True/False lowercase__ : List[str] = [k for k, v in qid_to_has_ans.items() if v] lowercase__ : Any = [k for k, v in qid_to_has_ans.items() if not v] lowercase__ , lowercase__ : Any = get_raw_scores(_lowerCAmelCase , _lowerCAmelCase ) lowercase__ : Optional[Any] = apply_no_ans_threshold(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.na_prob_thresh ) lowercase__ : Union[str, Any] = apply_no_ans_threshold(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.na_prob_thresh ) lowercase__ : Tuple = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase ) if has_ans_qids: lowercase__ : int = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase , qid_list=_lowerCAmelCase ) merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'HasAns' ) if no_ans_qids: lowercase__ : Optional[Any] = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase , qid_list=_lowerCAmelCase ) merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'NoAns' ) if OPTS.na_prob_file: find_all_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir ) histogram_na_prob(_lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir , 'hasAns' ) histogram_na_prob(_lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir , 'noAns' ) if OPTS.out_file: with open(OPTS.out_file , 'w' ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) else: print(json.dumps(_lowerCAmelCase , indent=2 ) ) if __name__ == "__main__": _UpperCamelCase : Optional[int] = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt main()
77
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('3.8'): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowerCAmelCase_ ( __A, __A=False ) -> Any: '''simple docstring''' try: UpperCAmelCase__ = os.environ[key] except KeyError: # KEY isn't set, default to `default`. UpperCAmelCase__ = default else: # KEY is set, convert it to True or False. try: UpperCAmelCase__ = strtobool(__A ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"""If set, {key} must be yes or no.""" ) return _value UpperCamelCase__ = parse_flag_from_env('RUN_SLOW', default=False) UpperCamelCase__ = parse_flag_from_env('RUN_REMOTE', default=False) UpperCamelCase__ = parse_flag_from_env('RUN_LOCAL', default=True) UpperCamelCase__ = parse_flag_from_env('RUN_PACKAGED', default=True) # Compression UpperCamelCase__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4') UpperCamelCase__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr') UpperCamelCase__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard') # Audio UpperCamelCase__ = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'), reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ', ) # Beam UpperCamelCase__ = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'), reason='test requires apache-beam and a compatible dill version', ) # Dill-cloudpickle compatibility UpperCamelCase__ = pytest.mark.skipif( config.DILL_VERSION <= version.parse('0.3.2'), reason='test requires dill>0.3.2 for cloudpickle compatibility', ) # Windows UpperCamelCase__ = pytest.mark.skipif( sys.platform == 'win32', reason='test should not be run on Windows', ) def lowerCAmelCase_ ( __A ) -> Any: '''simple docstring''' try: import faiss # noqa except ImportError: UpperCAmelCase__ = unittest.skip("test requires faiss" )(__A ) return test_case def lowerCAmelCase_ ( __A ) -> Optional[Any]: '''simple docstring''' try: import regex # noqa except ImportError: UpperCAmelCase__ = unittest.skip("test requires regex" )(__A ) return test_case def lowerCAmelCase_ ( __A ) -> List[str]: '''simple docstring''' try: import elasticsearch # noqa except ImportError: UpperCAmelCase__ = unittest.skip("test requires elasticsearch" )(__A ) return test_case def lowerCAmelCase_ ( __A ) -> List[Any]: '''simple docstring''' try: import sqlalchemy # noqa except ImportError: UpperCAmelCase__ = unittest.skip("test requires sqlalchemy" )(__A ) return test_case def lowerCAmelCase_ ( __A ) -> List[str]: '''simple docstring''' if not config.TORCH_AVAILABLE: UpperCAmelCase__ = unittest.skip("test requires PyTorch" )(__A ) return test_case def lowerCAmelCase_ ( __A ) -> Union[str, Any]: '''simple docstring''' if not config.TF_AVAILABLE: UpperCAmelCase__ = unittest.skip("test requires TensorFlow" )(__A ) return test_case def lowerCAmelCase_ ( __A ) -> Any: '''simple docstring''' if not config.JAX_AVAILABLE: UpperCAmelCase__ = unittest.skip("test requires JAX" )(__A ) return test_case def lowerCAmelCase_ ( __A ) -> int: '''simple docstring''' if not config.PIL_AVAILABLE: UpperCAmelCase__ = unittest.skip("test requires Pillow" )(__A ) return test_case def lowerCAmelCase_ ( __A ) -> Tuple: '''simple docstring''' try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers" )(__A ) else: return test_case def lowerCAmelCase_ ( __A ) -> Dict: '''simple docstring''' try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken" )(__A ) else: return test_case def lowerCAmelCase_ ( __A ) -> Optional[Any]: '''simple docstring''' try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy" )(__A ) else: return test_case def lowerCAmelCase_ ( __A ) -> Optional[int]: '''simple docstring''' def _require_spacy_model(__A ): try: import spacy # noqa F401 spacy.load(__A ) except ImportError: return unittest.skip("test requires spacy" )(__A ) except OSError: return unittest.skip("test requires spacy model '{}'".format(__A ) )(__A ) else: return test_case return _require_spacy_model def lowerCAmelCase_ ( __A ) -> Optional[Any]: '''simple docstring''' try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark" )(__A ) else: return test_case def lowerCAmelCase_ ( __A ) -> Tuple: '''simple docstring''' try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark" )(__A ) else: return test_case def lowerCAmelCase_ ( __A ) -> Optional[int]: '''simple docstring''' if not _run_slow_tests or _run_slow_tests == 0: UpperCAmelCase__ = unittest.skip("test is slow" )(__A ) return test_case def lowerCAmelCase_ ( __A ) -> List[Any]: '''simple docstring''' if not _run_local_tests or _run_local_tests == 0: UpperCAmelCase__ = unittest.skip("test is local" )(__A ) return test_case def lowerCAmelCase_ ( __A ) -> Optional[Any]: '''simple docstring''' if not _run_packaged_tests or _run_packaged_tests == 0: UpperCAmelCase__ = unittest.skip("test is packaged" )(__A ) return test_case def lowerCAmelCase_ ( __A ) -> Any: '''simple docstring''' if not _run_remote_tests or _run_remote_tests == 0: UpperCAmelCase__ = unittest.skip("test requires remote" )(__A ) return test_case def lowerCAmelCase_ ( *__A ) -> Optional[int]: '''simple docstring''' def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(__A ) and name.startswith("test" ): for decorator in decorators: UpperCAmelCase__ = decorator(__A ) setattr(cls, __A, __A ) return cls return decorate class A ( UpperCAmelCase_ ): pass class A ( UpperCAmelCase_ ): __UpperCAmelCase : Union[str, Any] = 0 __UpperCAmelCase : str = 1 __UpperCAmelCase : int = 2 @contextmanager def lowerCAmelCase_ ( __A=OfflineSimulationMode.CONNECTION_FAILS, __A=1e-16 ) -> List[str]: '''simple docstring''' UpperCAmelCase__ = requests.Session().request def timeout_request(__A, __A, __A, **__A ): # Change the url to an invalid url so that the connection hangs UpperCAmelCase__ = "https://10.255.255.1" if kwargs.get("timeout" ) is None: raise RequestWouldHangIndefinitelyError( f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" ) UpperCAmelCase__ = timeout try: return online_request(__A, __A, **__A ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier UpperCAmelCase__ = url UpperCAmelCase__ = e.args[0] UpperCAmelCase__ = (max_retry_error.args[0].replace("10.255.255.1", f"""OfflineMock[{url}]""" ),) UpperCAmelCase__ = (max_retry_error,) raise def raise_connection_error(__A, __A, **__A ): raise requests.ConnectionError("Offline mode is enabled.", request=__A ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send", __A ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request", __A ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE", __A ): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum." ) @contextmanager def lowerCAmelCase_ ( *__A, **__A ) -> str: '''simple docstring''' UpperCAmelCase__ = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__A, **__A ) as tmp_dir: try: os.chdir(__A ) yield finally: os.chdir(__A ) @contextmanager def lowerCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' import gc gc.collect() UpperCAmelCase__ = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowerCAmelCase_ ( ) -> List[str]: '''simple docstring''' import gc gc.collect() UpperCAmelCase__ = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowerCAmelCase_ ( __A, __A ) -> List[str]: '''simple docstring''' return deepcopy(__A ).integers(0, 100, 10 ).tolist() == deepcopy(__A ).integers(0, 100, 10 ).tolist() def lowerCAmelCase_ ( __A ) -> Optional[int]: '''simple docstring''' import decorator from requests.exceptions import HTTPError def _wrapper(__A, *__A, **__A ): try: return func(*__A, **__A ) except HTTPError as err: if str(__A ).startswith("500" ) or str(__A ).startswith("502" ): pytest.xfail(str(__A ) ) raise err return decorator.decorator(_wrapper, __A ) class A : def __init__(self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : List[str] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = returncode UpperCAmelCase__ = stdout UpperCAmelCase__ = stderr async def lowerCAmelCase_ ( __A, __A ) -> Optional[int]: '''simple docstring''' while True: UpperCAmelCase__ = await stream.readline() if line: callback(__A ) else: break async def lowerCAmelCase_ ( __A, __A=None, __A=None, __A=None, __A=False, __A=False ) -> _RunOutput: '''simple docstring''' if echo: print("\nRunning: ", " ".join(__A ) ) UpperCAmelCase__ = await asyncio.create_subprocess_exec( cmd[0], *cmd[1:], stdin=__A, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=__A, ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) UpperCAmelCase__ = [] UpperCAmelCase__ = [] def tee(__A, __A, __A, __A="" ): UpperCAmelCase__ = line.decode("utf-8" ).rstrip() sink.append(__A ) if not quiet: print(__A, __A, file=__A ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout, lambda __A : tee(__A, __A, sys.stdout, label="stdout:" ) ), _read_stream(p.stderr, lambda __A : tee(__A, __A, sys.stderr, label="stderr:" ) ), ], timeout=__A, ) return _RunOutput(await p.wait(), __A, __A ) def lowerCAmelCase_ ( __A, __A=None, __A=None, __A=180, __A=False, __A=True ) -> _RunOutput: '''simple docstring''' UpperCAmelCase__ = asyncio.get_event_loop() UpperCAmelCase__ = loop.run_until_complete( _stream_subprocess(__A, env=__A, stdin=__A, timeout=__A, quiet=__A, echo=__A ) ) UpperCAmelCase__ = " ".join(__A ) if result.returncode > 0: UpperCAmelCase__ = "\n".join(result.stderr ) raise RuntimeError( f"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" f"""The combined stderr from workers follows:\n{stderr}""" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f"""'{cmd_str}' produced no output.""" ) return result def lowerCAmelCase_ ( ) -> Tuple: '''simple docstring''' UpperCAmelCase__ = os.environ.get("PYTEST_XDIST_WORKER", "gw0" ) UpperCAmelCase__ = re.sub(r"^gw", "", __A, 0, re.M ) return int(__A ) def lowerCAmelCase_ ( ) -> List[Any]: '''simple docstring''' UpperCAmelCase__ = 29_500 UpperCAmelCase__ = pytest_xdist_worker_id() return port + uniq_delta
65
0
import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] =argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--original_config_file''', type=str, required=True, help='''The YAML config file corresponding to the original architecture.''', ) parser.add_argument( '''--num_in_channels''', default=None, type=int, help='''The number of input channels. If `None` number of input channels will be automatically inferred.''', ) parser.add_argument( '''--image_size''', default=5_12, type=int, help=( '''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2''' ''' Base. Use 768 for Stable Diffusion v2.''' ), ) parser.add_argument( '''--extract_ema''', action='''store_true''', help=( '''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights''' ''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield''' ''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.''' ), ) parser.add_argument( '''--upcast_attention''', action='''store_true''', help=( '''Whether the attention computation should always be upcasted. This is necessary when running stable''' ''' diffusion 2.1.''' ), ) parser.add_argument( '''--from_safetensors''', action='''store_true''', help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''', ) parser.add_argument( '''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''') def _lowercase ( _UpperCAmelCase ) -> List[str]: if string == "True": return True elif string == "False": return False else: raise ValueError(F"""could not parse string as bool {string}""" ) parser.add_argument( '''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool ) parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int) UpperCAmelCase__ : Any =parser.parse_args() UpperCAmelCase__ : Optional[Any] =download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
368
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline UpperCAmelCase__ : Dict =logging.get_logger(__name__) # pylint: disable=invalid-name class __A ( a ): def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ): super().__init__() self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ ) @torch.no_grad() def __call__( self , UpperCAmelCase_ = 1 , UpperCAmelCase_ = 100 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , ): if audio_length_in_s is None: lowerCamelCase =self.unet.config.sample_size / self.unet.config.sample_rate lowerCamelCase =audio_length_in_s * self.unet.config.sample_rate lowerCamelCase =2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) lowerCamelCase =int(UpperCAmelCase_ ) if sample_size % down_scale_factor != 0: lowerCamelCase =( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" """ process.""" ) lowerCamelCase =int(UpperCAmelCase_ ) lowerCamelCase =next(iter(self.unet.parameters() ) ).dtype lowerCamelCase =(batch_size, self.unet.config.in_channels, sample_size) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(UpperCAmelCase_ )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) lowerCamelCase =randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ ) # set step values self.scheduler.set_timesteps(UpperCAmelCase_ , device=audio.device ) lowerCamelCase =self.scheduler.timesteps.to(UpperCAmelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase =self.unet(UpperCAmelCase_ , UpperCAmelCase_ ).sample # 2. compute previous image: x_t -> t_t-1 lowerCamelCase =self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample lowerCamelCase =audio.clamp(-1 , 1 ).float().cpu().numpy() lowerCamelCase =audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=UpperCAmelCase_ )
262
0
from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ) -> list[float]: if radian_mode: return [magnitude * cos(lowerCamelCase__ ), magnitude * sin(lowerCamelCase__ )] return [magnitude * cos(radians(lowerCamelCase__ ) ), magnitude * sin(radians(lowerCamelCase__ ) )] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1_0**-1 ) -> bool: __lowerCamelCase : NDArray[floataa] = cross(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase : float = sum(lowerCamelCase__ ) return abs(lowerCamelCase__ ) < eps if __name__ == "__main__": # Test to check if it works a =array( [ polar_force(7_18.4, 180 - 30), polar_force(8_79.54, 45), polar_force(100, -90), ] ) a =array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg a =array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) a =array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg a =array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) a =array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
73
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class __magic_name__ : """simple docstring""" def __init__( self :Tuple , snake_case :Optional[Any] , snake_case :Tuple=13 , snake_case :Dict=7 , snake_case :List[Any]=True , snake_case :List[Any]=True , snake_case :Dict=True , snake_case :Any=True , snake_case :Optional[int]=99 , snake_case :Any=32 , snake_case :Dict=2 , snake_case :int=4 , snake_case :Optional[int]=37 , snake_case :List[str]="gelu" , snake_case :List[Any]=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Tuple=512 , snake_case :Tuple=16 , snake_case :Tuple=2 , snake_case :Optional[int]=0.02 , snake_case :str=3 , snake_case :Optional[int]=4 , snake_case :List[str]=None , snake_case :Tuple=1_000 , ): '''simple docstring''' A_ : str = parent A_ : str = batch_size A_ : str = seq_length A_ : Any = is_training A_ : Any = use_input_mask A_ : str = use_token_type_ids A_ : Tuple = use_labels A_ : Optional[Any] = vocab_size A_ : Dict = hidden_size A_ : str = num_hidden_layers A_ : Dict = num_attention_heads A_ : str = intermediate_size A_ : int = hidden_act A_ : List[Any] = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Optional[Any] = max_position_embeddings A_ : List[Any] = type_vocab_size A_ : Any = type_sequence_label_size A_ : Dict = initializer_range A_ : Any = num_labels A_ : Optional[int] = num_choices A_ : Optional[Any] = scope A_ : Any = range_bbox def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment A_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: A_ : str = bbox[i, j, 3] A_ : Union[str, Any] = bbox[i, j, 1] A_ : List[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: A_ : Any = bbox[i, j, 2] A_ : Tuple = bbox[i, j, 0] A_ : int = t A_ : int = tf.convert_to_tensor(snake_case ) A_ : Any = None if self.use_input_mask: A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : str = None if self.use_token_type_ids: A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : Dict = None A_ : List[Any] = None A_ : List[str] = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : str = ids_tensor([self.batch_size] , self.num_choices ) A_ : int = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , snake_case :Union[str, Any] , snake_case :int , snake_case :int , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Optional[int] , snake_case :List[Any] ): '''simple docstring''' A_ : Any = TFLayoutLMModel(config=snake_case ) A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) A_ : str = model(snake_case , snake_case , token_type_ids=snake_case ) A_ : List[Any] = model(snake_case , snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :List[Any] , snake_case :List[str] , snake_case :Optional[Any] , snake_case :Dict , snake_case :Any , snake_case :Union[str, Any] , snake_case :List[Any] ): '''simple docstring''' A_ : Optional[int] = TFLayoutLMForMaskedLM(config=snake_case ) A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , snake_case :Tuple , snake_case :Tuple , snake_case :List[str] , snake_case :Tuple , snake_case :str , snake_case :Optional[int] , snake_case :Any ): '''simple docstring''' A_ : Union[str, Any] = self.num_labels A_ : int = TFLayoutLMForSequenceClassification(config=snake_case ) A_ : Optional[int] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict , snake_case :str , snake_case :Optional[Any] , snake_case :int , snake_case :Any , snake_case :Tuple , snake_case :List[str] , snake_case :Union[str, Any] ): '''simple docstring''' A_ : List[Any] = self.num_labels A_ : str = TFLayoutLMForTokenClassification(config=snake_case ) A_ : Union[str, Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[str] , snake_case :Optional[int] , snake_case :Union[str, Any] , snake_case :List[Any] , snake_case :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ): '''simple docstring''' A_ : Optional[Any] = TFLayoutLMForQuestionAnswering(config=snake_case ) A_ : List[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : int = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Union[str, Any] = config_and_inputs A_ : Optional[Any] = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) __UpperCamelCase = ( { '''feature-extraction''': TFLayoutLMModel, '''fill-mask''': TFLayoutLMForMaskedLM, '''text-classification''': TFLayoutLMForSequenceClassification, '''token-classification''': TFLayoutLMForTokenClassification, '''zero-shot''': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) __UpperCamelCase = False __UpperCamelCase = True __UpperCamelCase = 10 def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : Tuple = TFLayoutLMModelTester(self ) A_ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[str] = TFLayoutLMModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' pass def __snake_case ( ) -> Optional[Any]: # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off A_ : int = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231 A_ : int = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 A_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 A_ : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) A_ : Tuple = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class __magic_name__ ( unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : str = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) A_ , A_ , A_ , A_ , A_ : Tuple = prepare_layoutlm_batch_inputs() # forward pass A_ : Tuple = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the sequence output on [0, :3, :3] A_ : List[Any] = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-3 ) ) # test the pooled output on [1, :3] A_ : Optional[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 ) A_ , A_ , A_ , A_ , A_ : Any = prepare_layoutlm_batch_inputs() # forward pass A_ : Dict = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar A_ : List[str] = outputs.loss A_ : Union[str, Any] = (2,) self.assertEqual(loss.shape , snake_case ) # test the shape of the logits A_ : Tuple = outputs.logits A_ : Tuple = (2, 2) self.assertEqual(logits.shape , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : int = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 ) A_ , A_ , A_ , A_ , A_ : Optional[int] = prepare_layoutlm_batch_inputs() # forward pass A_ : Union[str, Any] = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) # test the shape of the logits A_ : Dict = outputs.logits A_ : List[Any] = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) A_ , A_ , A_ , A_ , A_ : str = prepare_layoutlm_batch_inputs() # forward pass A_ : Union[str, Any] = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the shape of the logits A_ : Union[str, Any] = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , snake_case ) self.assertEqual(outputs.end_logits.shape , snake_case )
300
0
'''simple docstring''' a__ : Optional[int] = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' a__ : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}] a__ : str = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
243
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers a__ : Tuple = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
243
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict =StableDiffusionInstructPixaPixPipeline SCREAMING_SNAKE_CASE_ : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"} SCREAMING_SNAKE_CASE_ : List[Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE_ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE_ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS def _lowerCamelCase ( self : List[Any] ): torch.manual_seed(0 ) __UpperCamelCase = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , ) __UpperCamelCase = PNDMScheduler(skip_prk_steps=__A ) torch.manual_seed(0 ) __UpperCamelCase = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) __UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) __UpperCamelCase = CLIPTextModel(__A ) __UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __UpperCamelCase = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _lowerCamelCase ( self : str , __A : Dict , __A : Any=0 ): __UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__A ) ).to(__A ) __UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __UpperCamelCase = Image.fromarray(np.uinta(__A ) ).convert('RGB' ) if str(__A ).startswith('mps' ): __UpperCamelCase = torch.manual_seed(__A ) else: __UpperCamelCase = torch.Generator(device=__A ).manual_seed(__A ) __UpperCamelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'image_guidance_scale': 1, 'output_type': 'numpy', } return inputs def _lowerCamelCase ( self : Dict ): __UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.get_dummy_components() __UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**__A ) __UpperCamelCase = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) __UpperCamelCase = self.get_dummy_inputs(__A ) __UpperCamelCase = sd_pipe(**__A ).images __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) __UpperCamelCase = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.get_dummy_components() __UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**__A ) __UpperCamelCase = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) __UpperCamelCase = self.get_dummy_inputs(__A ) __UpperCamelCase = 'french fries' __UpperCamelCase = sd_pipe(**__A , negative_prompt=__A ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) __UpperCamelCase = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.get_dummy_components() __UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**__A ) __UpperCamelCase = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) __UpperCamelCase = self.get_dummy_inputs(__A ) __UpperCamelCase = [inputs['prompt']] * 2 __UpperCamelCase = np.array(inputs['image'] ).astype(np.floataa ) / 255.0 __UpperCamelCase = torch.from_numpy(__A ).unsqueeze(0 ).to(__A ) __UpperCamelCase = image / 2 + 0.5 __UpperCamelCase = image.permute(0 , 3 , 1 , 2 ) __UpperCamelCase = image.repeat(2 , 1 , 1 , 1 ) __UpperCamelCase = sd_pipe(**__A ).images __UpperCamelCase = image[-1, -3:, -3:, -1] assert image.shape == (2, 3_2, 3_2, 3) __UpperCamelCase = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.get_dummy_components() __UpperCamelCase = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' ) __UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**__A ) __UpperCamelCase = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) __UpperCamelCase = self.get_dummy_inputs(__A ) __UpperCamelCase = sd_pipe(**__A ).images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [round(__A , 4 ) for x in image_slice.flatten().tolist()] print(','.join([str(__A ) for x in slice] ) ) assert image.shape == (1, 3_2, 3_2, 3) __UpperCamelCase = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowerCamelCase ( self : Any ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_components() __UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**__A ) __UpperCamelCase = VaeImageProcessor(do_resize=__A , do_normalize=__A ) __UpperCamelCase = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) __UpperCamelCase = pipe(**self.get_dummy_inputs_by_type(__A , input_image_type='pt' ) )[0] __UpperCamelCase = components['vae'] __UpperCamelCase = self.get_dummy_inputs_by_type(__A , input_image_type='pt' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): __UpperCamelCase = vae.encode(inputs[image_param] ).latent_dist.mode() __UpperCamelCase = pipe(**__A )[0] __UpperCamelCase = np.abs(out - out_latents_inputs ).max() self.assertLess(__A , 1e-4 , 'passing latents as image input generate different result from passing image' ) @slow @require_torch_gpu class snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCamelCase ( self : Any ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self : int , __A : int=0 ): __UpperCamelCase = torch.manual_seed(__A ) __UpperCamelCase = load_image( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' ) __UpperCamelCase = { 'prompt': 'turn him into a cyborg', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'image_guidance_scale': 1.0, 'output_type': 'numpy', } return inputs def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() __UpperCamelCase = self.get_inputs() __UpperCamelCase = pipe(**__A ).images __UpperCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) __UpperCamelCase = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _lowerCamelCase ( self : Dict ): __UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__A ) __UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() __UpperCamelCase = self.get_inputs() __UpperCamelCase = pipe(**__A ).images __UpperCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) __UpperCamelCase = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _lowerCamelCase ( self : int ): __UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__A ) __UpperCamelCase = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() __UpperCamelCase = self.get_inputs() __UpperCamelCase = pipe(**__A ).images __UpperCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) __UpperCamelCase = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = 0 def callback_fn(__A : int , __A : int , __A : torch.FloatTensor ) -> None: __UpperCamelCase = True nonlocal number_of_steps number_of_steps += 1 if step == 1: __UpperCamelCase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 6_4) __UpperCamelCase = latents[0, -3:, -3:, -1] __UpperCamelCase = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: __UpperCamelCase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 6_4) __UpperCamelCase = latents[0, -3:, -3:, -1] __UpperCamelCase = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 __UpperCamelCase = False __UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__A , torch_dtype=torch.floataa ) __UpperCamelCase = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() __UpperCamelCase = self.get_inputs() pipe(**__A , callback=__A , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _lowerCamelCase ( self : List[str] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__A , torch_dtype=torch.floataa ) __UpperCamelCase = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __UpperCamelCase = self.get_inputs() __UpperCamelCase = pipe(**__A ) __UpperCamelCase = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 1_0**9 def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 __UpperCamelCase = inputs['image'].resize((5_0_4, 5_0_4) ) __UpperCamelCase = 'timbrooks/instruct-pix2pix' __UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( __A , safety_checker=__A , ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() __UpperCamelCase = pipe(**__A ) __UpperCamelCase = output.images[0] __UpperCamelCase = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert image.shape == (5_0_4, 5_0_4, 3) __UpperCamelCase = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
53
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys a__ : Tuple ='''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
53
1
"""simple docstring""" from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean lowercase__ :Any = 0 lowercase__ :List[str] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase__ :Optional[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right lowercase__ :List[Any] = tuple[int, int] class lowercase : def __init__( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,): lowercase = pos_x lowercase = pos_y lowercase = (pos_y, pos_x) lowercase = goal_x lowercase = goal_y lowercase = g_cost lowercase = parent lowercase = self.calculate_heuristic() lowercase = self.g_cost + self.h_cost def A__ ( self): lowercase = self.pos_x - self.goal_x lowercase = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(a_) + abs(a_) else: return sqrt(dy**2 + dx**2) def __lt__( self ,A__): return self.f_cost < other.f_cost class lowercase : def __init__( self ,A__ ,A__): lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,a_) lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,9_9_9_9_9 ,a_) lowercase = [self.start] lowercase = [] lowercase = False def A__ ( self): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() lowercase = self.open_nodes.pop(0) if current_node.pos == self.target.pos: return self.retrace_path(a_) self.closed_nodes.append(a_) lowercase = self.get_successors(a_) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(a_) else: # retrieve the best current path lowercase = self.open_nodes.pop(self.open_nodes.index(a_)) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(a_) else: self.open_nodes.append(a_) return [self.start.pos] def A__ ( self ,A__): lowercase = [] for action in delta: lowercase = parent.pos_x + action[1] lowercase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(a_) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( a_ ,a_ ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,a_ ,)) return successors def A__ ( self ,A__): lowercase = node lowercase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) lowercase = current_node.parent path.reverse() return path class lowercase : def __init__( self ,A__ ,A__): lowercase = AStar(a_ ,a_) lowercase = AStar(a_ ,a_) lowercase = False def A__ ( self): while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() lowercase = self.fwd_astar.open_nodes.pop(0) lowercase = self.bwd_astar.open_nodes.pop(0) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( a_ ,a_) self.fwd_astar.closed_nodes.append(a_) self.bwd_astar.closed_nodes.append(a_) lowercase = current_bwd_node lowercase = current_fwd_node lowercase = { self.fwd_astar: self.fwd_astar.get_successors(a_), self.bwd_astar: self.bwd_astar.get_successors(a_), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(a_) else: # retrieve the best current path lowercase = astar.open_nodes.pop( astar.open_nodes.index(a_)) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(a_) else: astar.open_nodes.append(a_) return [self.fwd_astar.start.pos] def A__ ( self ,A__ ,A__): lowercase = self.fwd_astar.retrace_path(a_) lowercase = self.bwd_astar.retrace_path(a_) bwd_path.pop() bwd_path.reverse() lowercase = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] lowercase__ :Tuple = (0, 0) lowercase__ :List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowercase__ :List[str] = time.time() lowercase__ :str = AStar(init, goal) lowercase__ :Tuple = a_star.search() lowercase__ :int = time.time() - start_time print(F'AStar execution time = {end_time:f} seconds') lowercase__ :Dict = time.time() lowercase__ :List[str] = BidirectionalAStar(init, goal) lowercase__ :Tuple = time.time() - bd_start_time print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
352
import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class lowercase ( SCREAMING_SNAKE_CASE__ ): lowercase_ : int =ComputeEnvironment.AMAZON_SAGEMAKER lowercase_ : Optional[int] =True lowercase_ : Any ='''ml.p3.2xlarge''' lowercase_ : Any ='''accelerate_sagemaker_execution_role''' lowercase_ : Union[str, Any] ='''hf-sm''' lowercase_ : Any ='''us-east-1''' lowercase_ : List[str] =1 lowercase_ : Any ='''accelerate-sagemaker-1''' lowercase_ : Union[str, Any] ='''1.6''' lowercase_ : Any ='''4.4''' lowercase_ : Any ='''train.py''' lowercase_ : int =[ '''--model_name_or_path''', '''bert''', '''--do_train''', '''False''', '''--epochs''', '''3''', '''--learning_rate''', '''5e-5''', '''--max_steps''', '''50.5''', ] lowercase_ : List[Any] =[ '''--model_name_or_path''', '''bert''', '''--do_train''', '''--do_test''', '''False''', '''--do_predict''', '''--epochs''', '''3''', '''--learning_rate''', '''5e-5''', '''--max_steps''', '''50.5''', ] class lowercase ( unittest.TestCase ): def A__ ( self): # If no defaults are changed, `to_kwargs` returns an empty dict. lowercase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args) assert isinstance(converted_args['''model_name_or_path'''] ,A__) assert isinstance(converted_args['''do_train'''] ,A__) assert isinstance(converted_args['''epochs'''] ,A__) assert isinstance(converted_args['''learning_rate'''] ,A__) assert isinstance(converted_args['''max_steps'''] ,A__) with pytest.raises(A__): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args)
97
0
import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class a_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(a__ ,'hidden_sizes' ) ) self.parent.assertTrue(hasattr(a__ ,'num_attention_heads' ) ) class a_ : """simple docstring""" def __init__( self : Any ,snake_case : Tuple ,snake_case : List[Any]=13 ,snake_case : Any=64 ,snake_case : List[Any]=3 ,snake_case : int=3 ,snake_case : Union[str, Any]=2 ,snake_case : Dict=1 ,snake_case : Union[str, Any]=16 ,snake_case : Dict=[128, 256, 384] ,snake_case : int=[4, 6, 8] ,snake_case : Dict=[2, 3, 4] ,snake_case : int=[16, 16, 16] ,snake_case : Union[str, Any]=0 ,snake_case : Tuple=[2, 2, 2] ,snake_case : Any=[2, 2, 2] ,snake_case : Union[str, Any]=0.02 ,snake_case : Optional[Any]=True ,snake_case : List[Any]=True ,snake_case : Dict=2 ,): SCREAMING_SNAKE_CASE =parent SCREAMING_SNAKE_CASE =batch_size SCREAMING_SNAKE_CASE =image_size SCREAMING_SNAKE_CASE =num_channels SCREAMING_SNAKE_CASE =kernel_size SCREAMING_SNAKE_CASE =stride SCREAMING_SNAKE_CASE =padding SCREAMING_SNAKE_CASE =hidden_sizes SCREAMING_SNAKE_CASE =num_attention_heads SCREAMING_SNAKE_CASE =depths SCREAMING_SNAKE_CASE =key_dim SCREAMING_SNAKE_CASE =drop_path_rate SCREAMING_SNAKE_CASE =patch_size SCREAMING_SNAKE_CASE =attention_ratio SCREAMING_SNAKE_CASE =mlp_ratio SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =[ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] SCREAMING_SNAKE_CASE =is_training SCREAMING_SNAKE_CASE =use_labels SCREAMING_SNAKE_CASE =num_labels SCREAMING_SNAKE_CASE =initializer_range def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE =None if self.use_labels: SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_labels ) SCREAMING_SNAKE_CASE =self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self : Dict ): return LevitConfig( image_size=self.image_size ,num_channels=self.num_channels ,kernel_size=self.kernel_size ,stride=self.stride ,padding=self.padding ,patch_size=self.patch_size ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,depths=self.depths ,key_dim=self.key_dim ,drop_path_rate=self.drop_path_rate ,mlp_ratio=self.mlp_ratio ,attention_ratio=self.attention_ratio ,initializer_range=self.initializer_range ,down_ops=self.down_ops ,) def _lowerCAmelCase ( self : int ,snake_case : Union[str, Any] ,snake_case : Optional[int] ,snake_case : Optional[int] ): SCREAMING_SNAKE_CASE =LevitModel(config=a__ ) model.to(a__ ) model.eval() SCREAMING_SNAKE_CASE =model(a__ ) SCREAMING_SNAKE_CASE =(self.image_size, self.image_size) SCREAMING_SNAKE_CASE =image_size[0], image_size[1] for _ in range(4 ): SCREAMING_SNAKE_CASE =floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) SCREAMING_SNAKE_CASE =floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) ,) def _lowerCAmelCase ( self : Any ,snake_case : List[str] ,snake_case : Optional[Any] ,snake_case : str ): SCREAMING_SNAKE_CASE =self.num_labels SCREAMING_SNAKE_CASE =LevitForImageClassification(a__ ) model.to(a__ ) model.eval() SCREAMING_SNAKE_CASE =model(a__ ,labels=a__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE =config_and_inputs SCREAMING_SNAKE_CASE ={"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class a_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) __UpperCAmelCase = ( { "feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =LevitModelTester(self ) SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=a__ ,has_text_modality=a__ ,hidden_size=37 ) def _lowerCAmelCase ( self : List[Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCAmelCase ( self : Optional[int] ): return @unittest.skip(reason='Levit does not use inputs_embeds' ) def _lowerCAmelCase ( self : Dict ): pass @unittest.skip(reason='Levit does not support input and output embeddings' ) def _lowerCAmelCase ( self : Optional[Any] ): pass @unittest.skip(reason='Levit does not output attentions' ) def _lowerCAmelCase ( self : Dict ): pass def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE =model_class(a__ ) SCREAMING_SNAKE_CASE =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE =[*signature.parameters.keys()] SCREAMING_SNAKE_CASE =["""pixel_values"""] self.assertListEqual(arg_names[:1] ,a__ ) def _lowerCAmelCase ( self : Union[str, Any] ): def check_hidden_states_output(snake_case : Any ,snake_case : Optional[Any] ,snake_case : List[str] ): SCREAMING_SNAKE_CASE =model_class(a__ ) model.to(a__ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(a__ ,a__ ) ) SCREAMING_SNAKE_CASE =outputs.hidden_states SCREAMING_SNAKE_CASE =len(self.model_tester.depths ) + 1 self.assertEqual(len(a__ ) ,a__ ) SCREAMING_SNAKE_CASE =(self.model_tester.image_size, self.model_tester.image_size) SCREAMING_SNAKE_CASE =image_size[0], image_size[1] for _ in range(4 ): SCREAMING_SNAKE_CASE =floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) SCREAMING_SNAKE_CASE =floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[ height * width, self.model_tester.hidden_sizes[0], ] ,) SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE =True check_hidden_states_output(a__ ,a__ ,a__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE =True check_hidden_states_output(a__ ,a__ ,a__ ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _lowerCAmelCase ( self : Optional[Any] ): pass def _lowerCAmelCase ( self : Any ,snake_case : Tuple ,snake_case : Union[str, Any] ,snake_case : Any=False ): SCREAMING_SNAKE_CASE =super()._prepare_for_class(a__ ,a__ ,return_labels=a__ ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _lowerCAmelCase ( self : Any ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def _lowerCAmelCase ( self : Any ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a__ ) def _lowerCAmelCase ( self : List[Any] ): if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE =True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(a__ ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue SCREAMING_SNAKE_CASE =model_class(a__ ) model.to(a__ ) model.train() SCREAMING_SNAKE_CASE =self._prepare_for_class(a__ ,a__ ,return_labels=a__ ) SCREAMING_SNAKE_CASE =model(**a__ ).loss loss.backward() def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE =False SCREAMING_SNAKE_CASE =True for model_class in self.all_model_classes: if model_class in get_values(a__ ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue SCREAMING_SNAKE_CASE =model_class(a__ ) model.gradient_checkpointing_enable() model.to(a__ ) model.train() SCREAMING_SNAKE_CASE =self._prepare_for_class(a__ ,a__ ,return_labels=a__ ) SCREAMING_SNAKE_CASE =model(**a__ ).loss loss.backward() def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE =[ {"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float}, {"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long}, {"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(a__ ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ): SCREAMING_SNAKE_CASE =problem_type["""title"""] SCREAMING_SNAKE_CASE =problem_type["""num_labels"""] SCREAMING_SNAKE_CASE =model_class(a__ ) model.to(a__ ) model.train() SCREAMING_SNAKE_CASE =self._prepare_for_class(a__ ,a__ ,return_labels=a__ ) if problem_type["num_labels"] > 1: SCREAMING_SNAKE_CASE =inputs["""labels"""].unsqueeze(1 ).repeat(1 ,problem_type['num_labels'] ) SCREAMING_SNAKE_CASE =inputs["""labels"""].to(problem_type['dtype'] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=a__ ) as warning_list: SCREAMING_SNAKE_CASE =model(**a__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f'Something is going wrong in the regression problem: intercepted {w.message}' ) loss.backward() @slow def _lowerCAmelCase ( self : int ): for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE =LevitModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class a_ ( unittest.TestCase ): """simple docstring""" @cached_property def _lowerCAmelCase ( self : int ): return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _lowerCAmelCase ( self : Dict ): SCREAMING_SNAKE_CASE =LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( a__ ) SCREAMING_SNAKE_CASE =self.default_image_processor SCREAMING_SNAKE_CASE =prepare_img() SCREAMING_SNAKE_CASE =image_processor(images=a__ ,return_tensors='pt' ).to(a__ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE =model(**a__ ) # verify the logits SCREAMING_SNAKE_CASE =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,a__ ) SCREAMING_SNAKE_CASE =torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(a__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,a__ ,atol=1e-4 ) )
334
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class __A ( SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa" _UpperCamelCase : Dict = ( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) _UpperCamelCase : Optional[int] = "document_qa" _UpperCamelCase : Any = AutoProcessor _UpperCamelCase : Union[str, Any] = VisionEncoderDecoderModel _UpperCamelCase : Union[str, Any] = ["image", "text"] _UpperCamelCase : List[str] = ["text"] def __init__( self , *a__ , **a__ ): if not is_vision_available(): raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" ) super().__init__(*a__ , **a__ ) def __A ( self , a__ , a__ ): _lowerCAmelCase : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>""" _lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , a__ ) _lowerCAmelCase : str = self.pre_processor.tokenizer( a__ , add_special_tokens=a__ , return_tensors="""pt""" ).input_ids _lowerCAmelCase : Dict = self.pre_processor(a__ , return_tensors="""pt""" ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __A ( self , a__ ): return self.model.generate( inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences def __A ( self , a__ ): _lowerCAmelCase : Tuple = self.pre_processor.batch_decode(a__ )[0] _lowerCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" ) _lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" ) _lowerCAmelCase : List[str] = re.sub(r"""<.*?>""" , """""" , a__ , count=1 ).strip() # remove first task start token _lowerCAmelCase : List[str] = self.pre_processor.tokenajson(a__ ) return sequence["answer"]
44
0
def lowerCamelCase__ ( a ) -> bool: return str(a ) == str(a )[::-1] def lowerCamelCase__ ( a ) -> int: return int(a ) + int(str(a )[::-1] ) def lowerCamelCase__ ( a = 1_00_00 ) -> int: _A: Tuple = [] for num in range(1 , a ): _A: int = 0 _A: Any = num while iterations < 50: _A: List[Any] = sum_reverse(a ) iterations += 1 if is_palindrome(a ): break else: lychrel_nums.append(a ) return len(a ) if __name__ == "__main__": print(F"""{solution() = }""")
301
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class UpperCAmelCase : '''simple docstring''' __UpperCamelCase : Any = MBartConfig __UpperCamelCase : Tuple = {} __UpperCamelCase : Dict = '''gelu''' def __init__( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Union[str, Any]=9_9 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Union[str, Any]=3_7 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : List[str]=2_0 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : List[Any]=0 , ): """simple docstring""" _A: Union[str, Any] = parent _A: List[Any] = batch_size _A: Dict = seq_length _A: Dict = is_training _A: str = use_labels _A: int = vocab_size _A: str = hidden_size _A: Tuple = num_hidden_layers _A: Optional[Any] = num_attention_heads _A: Tuple = intermediate_size _A: int = hidden_dropout_prob _A: Tuple = attention_probs_dropout_prob _A: Tuple = max_position_embeddings _A: Dict = eos_token_id _A: int = pad_token_id _A: Any = bos_token_id def __magic_name__ ( self : Dict ): """simple docstring""" _A: Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A: Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A: List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) _A: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A: int = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _A: Any = prepare_mbart_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return config, inputs_dict def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ): """simple docstring""" _A: Tuple = TFMBartModel(config=lowerCAmelCase_ ).get_decoder() _A: List[str] = inputs_dict['''input_ids'''] _A: Tuple = input_ids[:1, :] _A: List[Any] = inputs_dict['''attention_mask'''][:1, :] _A: str = inputs_dict['''head_mask'''] _A: Optional[Any] = 1 # first forward pass _A: Any = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ ) _A , _A: List[str] = outputs.to_tuple() _A: Dict = past_key_values[1] def lowerCamelCase__ ( a , a , a , a=None , a=None , a=None , a=None , a=None , ) -> Tuple: if attention_mask is None: _A: Union[str, Any] = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A: Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A: Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A: Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _A: Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase : Union[str, Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () __UpperCamelCase : int = (TFMBartForConditionalGeneration,) if is_tf_available() else () __UpperCamelCase : Tuple = ( { '''conversational''': TFMBartForConditionalGeneration, '''feature-extraction''': TFMBartModel, '''summarization''': TFMBartForConditionalGeneration, '''text2text-generation''': TFMBartForConditionalGeneration, '''translation''': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) __UpperCamelCase : List[Any] = True __UpperCamelCase : int = False __UpperCamelCase : Optional[Any] = False def __magic_name__ ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ): """simple docstring""" if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def __magic_name__ ( self : Any ): """simple docstring""" _A: Dict = TFMBartModelTester(self ) _A: Tuple = ConfigTester(self , config_class=lowerCAmelCase_ ) def __magic_name__ ( self : List[str] ): """simple docstring""" self.config_tester.run_common_tests() def __magic_name__ ( self : Optional[Any] ): """simple docstring""" _A: str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ ) @require_sentencepiece @require_tokenizers @require_tf class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' __UpperCamelCase : Optional[int] = [ ''' UN Chief Says There Is No Military Solution in Syria''', ] __UpperCamelCase : List[str] = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', ] __UpperCamelCase : Union[str, Any] = '''facebook/mbart-large-en-ro''' @cached_property def __magic_name__ ( self : Tuple ): """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __magic_name__ ( self : str ): """simple docstring""" _A: Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __magic_name__ ( self : Union[str, Any] , **lowerCAmelCase_ : Tuple ): """simple docstring""" _A: Optional[Any] = self.translate_src_text(**lowerCAmelCase_ ) self.assertListEqual(self.expected_text , lowerCAmelCase_ ) def __magic_name__ ( self : Dict , **lowerCAmelCase_ : Tuple ): """simple docstring""" _A: Any = self.tokenizer(self.src_text , **lowerCAmelCase_ , return_tensors='''tf''' ) _A: Any = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) _A: Optional[Any] = self.tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) return generated_words @slow def __magic_name__ ( self : List[str] ): """simple docstring""" self._assert_generated_batch_equal_expected()
301
1
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) lowercase : List[str] = { """iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""", """iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""", """iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""", """mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""", """mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""", """mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""", """mask_downscaling.0""": """mask_embed.conv1""", """mask_downscaling.1""": """mask_embed.layer_norm1""", """mask_downscaling.3""": """mask_embed.conv2""", """mask_downscaling.4""": """mask_embed.layer_norm2""", """mask_downscaling.6""": """mask_embed.conv3""", """point_embeddings""": """point_embed""", """pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""", """image_encoder""": """vision_encoder""", """neck.0""": """neck.conv1""", """neck.1""": """neck.layer_norm1""", """neck.2""": """neck.conv2""", """neck.3""": """neck.layer_norm2""", """patch_embed.proj""": """patch_embed.projection""", """.norm""": """.layer_norm""", """blocks""": """layers""", } def A_ ( A__ ) -> int: a__ : str = {} state_dict.pop('pixel_mean' , A__ ) state_dict.pop('pixel_std' , A__ ) a__ : Any = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: a__ : str = key.replace(A__ , A__ ) if re.match(A__ , A__ ): a__ : Any = int(re.match(A__ , A__ ).group(2 ) ) if layer_nb == 0: a__ : List[str] = key.replace('layers.0' , 'proj_in' ) elif layer_nb == 1: a__ : Optional[int] = key.replace('layers.1' , 'layers.0' ) elif layer_nb == 2: a__ : List[str] = key.replace('layers.2' , 'proj_out' ) a__ : Optional[Any] = value a__ : List[Any] = model_state_dict[ 'prompt_encoder.shared_embedding.positional_embedding' ] return model_state_dict def A_ ( A__ , A__ , A__ , A__="ybelkada/segment-anything" ) -> str: a__ : int = hf_hub_download(A__ , F'checkpoints/{model_name}.pth' ) if "sam_vit_b" in model_name: a__ : Tuple = SamConfig() elif "sam_vit_l" in model_name: a__ : List[str] = SamVisionConfig( hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) a__ : List[str] = SamConfig( vision_config=A__ , ) elif "sam_vit_h" in model_name: a__ : Tuple = SamVisionConfig( hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) a__ : Optional[int] = SamConfig( vision_config=A__ , ) a__ : Tuple = torch.load(A__ , map_location='cpu' ) a__ : Optional[int] = replace_keys(A__ ) a__ : Union[str, Any] = SamImageProcessor() a__ : Optional[Any] = SamProcessor(image_processor=A__ ) a__ : List[Any] = SamModel(A__ ) hf_model.load_state_dict(A__ ) a__ : Optional[Any] = hf_model.to('cuda' ) a__ : List[Any] = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png' a__ : str = Image.open(requests.get(A__ , stream=A__ ).raw ).convert('RGB' ) a__ : int = [[[400, 650]]] a__ : List[str] = [[1]] a__ : str = processor(images=np.array(A__ ) , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): a__ : Union[str, Any] = hf_model(**A__ ) a__ : Any = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.5_79_89_02_51_15_96_68 a__ : Any = processor( images=np.array(A__ ) , input_points=A__ , input_labels=A__ , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): a__ : Union[str, Any] = hf_model(**A__ ) a__ : Dict = output.iou_scores.squeeze() assert scores[-1].item() == 0.97_12_60_30_92_19_36_04 a__ : str = ((75, 275, 1725, 850),) a__ : Union[str, Any] = processor(images=np.array(A__ ) , input_boxes=A__ , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): a__ : List[Any] = hf_model(**A__ ) a__ : Any = output.iou_scores.squeeze() assert scores[-1].item() == 0.86_86_01_56_05_92_65_14 # Test with 2 points and 1 image. a__ : Dict = [[[400, 650], [800, 650]]] a__ : List[str] = [[1, 1]] a__ : Any = processor( images=np.array(A__ ) , input_points=A__ , input_labels=A__ , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): a__ : Optional[Any] = hf_model(**A__ ) a__ : str = output.iou_scores.squeeze() assert scores[-1].item() == 0.99_36_04_77_92_43_46_92 if __name__ == "__main__": lowercase : Dict = argparse.ArgumentParser() lowercase : List[str] = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""] parser.add_argument( """--model_name""", default="""sam_vit_h_4b8939""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) parser.add_argument( """--model_hub_id""", default="""ybelkada/segment-anything""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) lowercase : List[Any] = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
99
'''simple docstring''' import operator as op def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> int: lowercase_ : Optional[Any] = [] lowercase_ : str = lambda UpperCAmelCase__ , UpperCAmelCase__ : int(x / y ) # noqa: E731 integer division operation lowercase_ : Optional[Any] = { """^""": op.pow, """*""": op.mul, """/""": div, """+""": op.add, """-""": op.sub, } # operators & their respective operation # print table header print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ ) print("""-""" * (30 + len(UpperCAmelCase__ )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(UpperCAmelCase__ ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(UpperCAmelCase__ ) , sep=""" | """ ) else: lowercase_ : str = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(UpperCAmelCase__ ) , sep=""" | """ ) lowercase_ : Optional[int] = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(UpperCAmelCase__ ) , sep=""" | """ ) stack.append( str(opr[x](int(UpperCAmelCase__ ) , int(UpperCAmelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(UpperCAmelCase__ ) , sep=""" | """ , ) return int(stack[0] ) if __name__ == "__main__": _lowercase : Tuple = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ") print("\n\tResult = ", solve(Postfix))
239
0
from __future__ import annotations def __lowerCamelCase ( __magic_name__ : list , __magic_name__ : int | None = None , __magic_name__ : int | None = None ): if start is None: a__: int =0 if end is None: a__: Any =len(__magic_name__ ) - 1 if start >= end: return a__: Dict =(start + end) // 2 slowsort(__magic_name__ , __magic_name__ , __magic_name__ ) slowsort(__magic_name__ , mid + 1 , __magic_name__ ) if sequence[end] < sequence[mid]: a__ , a__: str =sequence[mid], sequence[end] slowsort(__magic_name__ , __magic_name__ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
42
from __future__ import annotations def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : str ): # noqa: E741 while r - l > 1: a__: Any =(l + r) // 2 if v[m] >= key: a__: Any =m else: a__: Optional[int] =m # noqa: E741 return r def __lowerCamelCase ( __magic_name__ : list[int] ): if len(__magic_name__ ) == 0: return 0 a__: Tuple =[0] * len(__magic_name__ ) a__: Optional[int] =1 a__: Optional[Any] =v[0] for i in range(1 , len(__magic_name__ ) ): if v[i] < tail[0]: a__: Union[str, Any] =v[i] elif v[i] > tail[length - 1]: a__: Optional[int] =v[i] length += 1 else: a__: Dict =v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
42
1
"""simple docstring""" import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) UpperCAmelCase__ = logging.getLogger(__name__) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = np.argmax(lowercase ,axis=1 ) return np.sum(outputs == labels ) def __UpperCAmelCase ( lowercase ): """simple docstring""" with open(lowercase ,encoding="""utf_8""" ) as f: _UpperCAmelCase = csv.reader(lowercase ) _UpperCAmelCase = [] next(lowercase ) # skip the first line for line in tqdm(lowercase ): output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [] for dataset in encoded_datasets: _UpperCAmelCase = len(lowercase ) _UpperCAmelCase = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa ) _UpperCAmelCase = np.zeros((n_batch, 2) ,dtype=np.intaa ) _UpperCAmelCase = np.full((n_batch, 2, input_len) ,fill_value=-1_00 ,dtype=np.intaa ) _UpperCAmelCase = np.zeros((n_batch,) ,dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(lowercase ): _UpperCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] _UpperCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] _UpperCAmelCase = with_conta _UpperCAmelCase = with_conta _UpperCAmelCase = len(lowercase ) - 1 _UpperCAmelCase = len(lowercase ) - 1 _UpperCAmelCase = with_conta _UpperCAmelCase = with_conta _UpperCAmelCase = mc_label _UpperCAmelCase = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(lowercase ) for t in all_inputs ) ) return tensor_datasets def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("""--model_name""" ,type=lowercase ,default="""openai-gpt""" ,help="""pretrained model name""" ) parser.add_argument("""--do_train""" ,action="""store_true""" ,help="""Whether to run training.""" ) parser.add_argument("""--do_eval""" ,action="""store_true""" ,help="""Whether to run eval on the dev set.""" ) parser.add_argument( """--output_dir""" ,default=lowercase ,type=lowercase ,required=lowercase ,help="""The output directory where the model predictions and checkpoints will be written.""" ,) parser.add_argument("""--train_dataset""" ,type=lowercase ,default="""""" ) parser.add_argument("""--eval_dataset""" ,type=lowercase ,default="""""" ) parser.add_argument("""--seed""" ,type=lowercase ,default=42 ) parser.add_argument("""--num_train_epochs""" ,type=lowercase ,default=3 ) parser.add_argument("""--train_batch_size""" ,type=lowercase ,default=8 ) parser.add_argument("""--eval_batch_size""" ,type=lowercase ,default=16 ) parser.add_argument("""--adam_epsilon""" ,default=1E-8 ,type=lowercase ,help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" ,type=lowercase ,default=1 ) parser.add_argument( """--max_steps""" ,default=-1 ,type=lowercase ,help=( """If > 0: set total number of training steps to perform. Override num_train_epochs.""" ) ,) parser.add_argument( """--gradient_accumulation_steps""" ,type=lowercase ,default=1 ,help="""Number of updates steps to accumulate before performing a backward/update pass.""" ,) parser.add_argument("""--learning_rate""" ,type=lowercase ,default=6.25E-5 ) parser.add_argument("""--warmup_steps""" ,default=0 ,type=lowercase ,help="""Linear warmup over warmup_steps.""" ) parser.add_argument("""--lr_schedule""" ,type=lowercase ,default="""warmup_linear""" ) parser.add_argument("""--weight_decay""" ,type=lowercase ,default=0.01 ) parser.add_argument("""--lm_coef""" ,type=lowercase ,default=0.9 ) parser.add_argument("""--n_valid""" ,type=lowercase ,default=3_74 ) parser.add_argument("""--server_ip""" ,type=lowercase ,default="""""" ,help="""Can be used for distant debugging.""" ) parser.add_argument("""--server_port""" ,type=lowercase ,default="""""" ,help="""Can be used for distant debugging.""" ) _UpperCAmelCase = parser.parse_args() print(lowercase ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("""Waiting for debugger attach""" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=lowercase ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) _UpperCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) _UpperCAmelCase = torch.cuda.device_count() logger.info("""device: {}, n_gpu {}""".format(lowercase ,lowercase ) ) if not args.do_train and not args.do_eval: raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset _UpperCAmelCase = ["""_start_""", """_delimiter_""", """_classify_"""] _UpperCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(lowercase ) _UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase ) _UpperCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(lowercase ) ) model.to(lowercase ) # Load and encode the datasets def tokenize_and_encode(lowercase ): if isinstance(lowercase ,lowercase ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(lowercase ) ) elif isinstance(lowercase ,lowercase ): return obj return [tokenize_and_encode(lowercase ) for o in obj] logger.info("""Encoding dataset...""" ) _UpperCAmelCase = load_rocstories_dataset(args.train_dataset ) _UpperCAmelCase = load_rocstories_dataset(args.eval_dataset ) _UpperCAmelCase = (train_dataset, eval_dataset) _UpperCAmelCase = tokenize_and_encode(lowercase ) # Compute the max input length for the Transformer _UpperCAmelCase = model.config.n_positions // 2 - 2 _UpperCAmelCase = max( len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) _UpperCAmelCase = min(lowercase ,model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders _UpperCAmelCase = pre_process_datasets(lowercase ,lowercase ,lowercase ,*lowercase ) _UpperCAmelCase , _UpperCAmelCase = tensor_datasets[0], tensor_datasets[1] _UpperCAmelCase = TensorDataset(*lowercase ) _UpperCAmelCase = RandomSampler(lowercase ) _UpperCAmelCase = DataLoader(lowercase ,sampler=lowercase ,batch_size=args.train_batch_size ) _UpperCAmelCase = TensorDataset(*lowercase ) _UpperCAmelCase = SequentialSampler(lowercase ) _UpperCAmelCase = DataLoader(lowercase ,sampler=lowercase ,batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: _UpperCAmelCase = args.max_steps _UpperCAmelCase = args.max_steps // (len(lowercase ) // args.gradient_accumulation_steps) + 1 else: _UpperCAmelCase = len(lowercase ) // args.gradient_accumulation_steps * args.num_train_epochs _UpperCAmelCase = list(model.named_parameters() ) _UpperCAmelCase = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""] _UpperCAmelCase = [ { """params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], """weight_decay""": args.weight_decay, }, {"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0}, ] _UpperCAmelCase = AdamW(lowercase ,lr=args.learning_rate ,eps=args.adam_epsilon ) _UpperCAmelCase = get_linear_schedule_with_warmup( lowercase ,num_warmup_steps=args.warmup_steps ,num_training_steps=lowercase ) if args.do_train: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) ,desc="""Epoch""" ): _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = tqdm(lowercase ,desc="""Training""" ) for step, batch in enumerate(lowercase ): _UpperCAmelCase = tuple(t.to(lowercase ) for t in batch ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = batch _UpperCAmelCase = model(lowercase ,mc_token_ids=lowercase ,lm_labels=lowercase ,mc_labels=lowercase ) _UpperCAmelCase = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() _UpperCAmelCase = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 _UpperCAmelCase = """Training loss: {:.2e} lr: {:.2e}""".format(lowercase ,scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer _UpperCAmelCase = model.module if hasattr(lowercase ,"""module""" ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` _UpperCAmelCase = os.path.join(args.output_dir ,lowercase ) _UpperCAmelCase = os.path.join(args.output_dir ,lowercase ) torch.save(model_to_save.state_dict() ,lowercase ) model_to_save.config.to_json_file(lowercase ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned _UpperCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) _UpperCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(lowercase ) if args.do_eval: model.eval() _UpperCAmelCase , _UpperCAmelCase = 0, 0 _UpperCAmelCase , _UpperCAmelCase = 0, 0 for batch in tqdm(lowercase ,desc="""Evaluating""" ): _UpperCAmelCase = tuple(t.to(lowercase ) for t in batch ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = batch with torch.no_grad(): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = model( lowercase ,mc_token_ids=lowercase ,lm_labels=lowercase ,mc_labels=lowercase ) _UpperCAmelCase = mc_logits.detach().cpu().numpy() _UpperCAmelCase = mc_labels.to("""cpu""" ).numpy() _UpperCAmelCase = accuracy(lowercase ,lowercase ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 _UpperCAmelCase = eval_loss / nb_eval_steps _UpperCAmelCase = eval_accuracy / nb_eval_examples _UpperCAmelCase = tr_loss / nb_tr_steps if args.do_train else None _UpperCAmelCase = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss} _UpperCAmelCase = os.path.join(args.output_dir ,"""eval_results.txt""" ) with open(lowercase ,"""w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""" ,lowercase ,str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) if __name__ == "__main__": main()
289
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class a : def __init__( self : Union[str, Any] ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ): if nodea not in self.connections: self.add_node(__lowerCAmelCase ) if nodea not in self.connections: self.add_node(__lowerCAmelCase ) _UpperCAmelCase = probability def lowerCAmelCase_ ( self : Optional[Any] ): return list(self.connections ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ): _UpperCAmelCase = 0 _UpperCAmelCase = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = Counter(graph.get_nodes() ) _UpperCAmelCase = start for _ in range(lowercase ): _UpperCAmelCase = graph.transition(lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
289
1
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _lowerCamelCase = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", f"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", f"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Tuple = state_dict.pop(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = val def a__ ( _SCREAMING_SNAKE_CASE : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Dict = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCAmelCase_ : Optional[int] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) UpperCAmelCase_ : Union[str, Any] = value else: UpperCAmelCase_ : int = value return new_state_dict def a__ ( _SCREAMING_SNAKE_CASE : List[str] ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = "" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCAmelCase_ : str = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ : List[Any] = in_proj_weight[:2_56, :] UpperCAmelCase_ : Optional[int] = in_proj_bias[:2_56] UpperCAmelCase_ : Dict = in_proj_weight[2_56:5_12, :] UpperCAmelCase_ : Dict = in_proj_bias[2_56:5_12] UpperCAmelCase_ : int = in_proj_weight[-2_56:, :] UpperCAmelCase_ : Dict = in_proj_bias[-2_56:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCAmelCase_ : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ : Union[str, Any] = in_proj_weight[:2_56, :] UpperCAmelCase_ : Optional[int] = in_proj_bias[:2_56] UpperCAmelCase_ : Optional[Any] = in_proj_weight[2_56:5_12, :] UpperCAmelCase_ : List[str] = in_proj_bias[2_56:5_12] UpperCAmelCase_ : Optional[int] = in_proj_weight[-2_56:, :] UpperCAmelCase_ : List[Any] = in_proj_bias[-2_56:] # read in weights + bias of input projection layer of cross-attention UpperCAmelCase_ : int = state_dict.pop( F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) UpperCAmelCase_ : Union[str, Any] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict UpperCAmelCase_ : List[str] = in_proj_weight_cross_attn[:2_56, :] UpperCAmelCase_ : Dict = in_proj_bias_cross_attn[:2_56] UpperCAmelCase_ : List[Any] = in_proj_weight_cross_attn[2_56:5_12, :] UpperCAmelCase_ : int = in_proj_bias_cross_attn[2_56:5_12] UpperCAmelCase_ : int = in_proj_weight_cross_attn[-2_56:, :] UpperCAmelCase_ : str = in_proj_bias_cross_attn[-2_56:] def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ) -> Any: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image.size UpperCAmelCase_ : int = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = 8_00 if "detection" in checkpoint_url else 10_00 UpperCAmelCase_ : str = target_max_size / current_max_size UpperCAmelCase_ : Tuple = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def a__ ( _SCREAMING_SNAKE_CASE : Any ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = F.to_tensor(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = F.normalize(_SCREAMING_SNAKE_CASE , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ) -> Any: """simple docstring""" logger.info("Converting model..." ) # load original state dict UpperCAmelCase_ : Union[str, Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="cpu" ) # rename keys for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = rename_backbone_keys(_SCREAMING_SNAKE_CASE ) # query, key and value matrices need special treatment read_in_q_k_v(_SCREAMING_SNAKE_CASE ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCAmelCase_ : Any = "model." for key in state_dict.copy().keys(): if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): UpperCAmelCase_ : Optional[Any] = state_dict.pop(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = val # create HuggingFace model and load state dict UpperCAmelCase_ : str = TableTransformerConfig( backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: UpperCAmelCase_ : str = 15 UpperCAmelCase_ : str = 2 UpperCAmelCase_ : Union[str, Any] = {0: "table", 1: "table rotated"} UpperCAmelCase_ : Tuple = idalabel UpperCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()} else: UpperCAmelCase_ : Tuple = 1_25 UpperCAmelCase_ : Tuple = 6 UpperCAmelCase_ : Union[str, Any] = { 0: "table", 1: "table column", 2: "table row", 3: "table column header", 4: "table projected row header", 5: "table spanning cell", } UpperCAmelCase_ : str = idalabel UpperCAmelCase_ : Any = {v: k for k, v in idalabel.items()} UpperCAmelCase_ : List[Any] = DetrImageProcessor( format="coco_detection" , max_size=8_00 if "detection" in checkpoint_url else 10_00 ) UpperCAmelCase_ : Optional[int] = TableTransformerForObjectDetection(_SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) model.eval() # verify our conversion UpperCAmelCase_ : Optional[Any] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png" UpperCAmelCase_ : Dict = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = Image.open(_SCREAMING_SNAKE_CASE ).convert("RGB" ) UpperCAmelCase_ : int = normalize(resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ).unsqueeze(0 ) UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE ) if "detection" in checkpoint_url: UpperCAmelCase_ : Any = (1, 15, 3) UpperCAmelCase_ : Optional[int] = torch.tensor( [[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] ) UpperCAmelCase_ : Dict = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] ) else: UpperCAmelCase_ : Union[str, Any] = (1, 1_25, 7) UpperCAmelCase_ : List[str] = torch.tensor( [[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] ) UpperCAmelCase_ : Any = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: # Push model to HF hub logger.info("Pushing model to the hub..." ) UpperCAmelCase_ : List[str] = ( "microsoft/table-transformer-detection" if "detection" in checkpoint_url else "microsoft/table-transformer-structure-recognition" ) model.push_to_hub(_SCREAMING_SNAKE_CASE ) image_processor.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _lowerCamelCase = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
67
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class _snake_case (__SCREAMING_SNAKE_CASE): def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = tempfile.mkdtemp() UpperCAmelCase_ : Optional[int] = 8 # DPR tok UpperCAmelCase_ : Optional[Any] = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase_ : Any = os.path.join(self.tmpdirname ,"dpr_tokenizer" ) os.makedirs(_snake_case ,exist_ok=_snake_case ) UpperCAmelCase_ : List[str] = os.path.join(_snake_case ,DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok UpperCAmelCase_ : Optional[int] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] UpperCAmelCase_ : str = dict(zip(_snake_case ,range(len(_snake_case ) ) ) ) UpperCAmelCase_ : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] UpperCAmelCase_ : Optional[int] = {"unk_token": "<unk>"} UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname ,"bart_tokenizer" ) os.makedirs(_snake_case ,exist_ok=_snake_case ) UpperCAmelCase_ : Any = os.path.join(_snake_case ,BART_VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ : Union[str, Any] = os.path.join(_snake_case ,BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp: fp.write(json.dumps(_snake_case ) + "\n" ) with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp: fp.write("\n".join(_snake_case ) ) def UpperCamelCase__ ( self ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"dpr_tokenizer" ) ) def UpperCamelCase__ ( self ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"dpr_tokenizer" ) ) def UpperCamelCase__ ( self ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"bart_tokenizer" ) ) def UpperCamelCase__ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("embeddings" ,string_factory="Flat" ,metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = self.get_dummy_dataset() UpperCAmelCase_ : Optional[Any] = RagConfig( retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,) with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: UpperCAmelCase_ : List[Any] = dataset UpperCAmelCase_ : Any = RagRetriever( _snake_case ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,) return retriever def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Union[str, Any] = self.get_dummy_dataset() UpperCAmelCase_ : Union[str, Any] = RagConfig( retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name="custom" ,) if from_disk: UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,"dataset" ) UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,"index.faiss" ) dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname ,"index.faiss" ) ) dataset.drop_index("embeddings" ) dataset.save_to_disk(os.path.join(self.tmpdirname ,"dataset" ) ) del dataset UpperCAmelCase_ : List[Any] = RagRetriever( _snake_case ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,) else: UpperCAmelCase_ : int = RagRetriever( _snake_case ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,index=CustomHFIndex(config.retrieval_vector_size ,_snake_case ) ,) return retriever def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("embeddings" ,string_factory="Flat" ,metric_type=faiss.METRIC_INNER_PRODUCT ) UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,"hf_bert_base.hnswSQ8_correct_phi_128.c_index" ) dataset.save_faiss_index("embeddings" ,index_file_name + ".index.dpr" ) pickle.dump(dataset["id"] ,open(index_file_name + ".index_meta.dpr" ,"wb" ) ) UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname ,"psgs_w100.tsv.pkl" ) UpperCAmelCase_ : Optional[Any] = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(_snake_case ,open(_snake_case ,"wb" ) ) UpperCAmelCase_ : List[Any] = RagConfig( retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name="legacy" ,index_path=self.tmpdirname ,) UpperCAmelCase_ : Optional[Any] = RagRetriever( _snake_case ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ) return retriever def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Dict = self.get_dummy_canonical_hf_index_retriever() UpperCAmelCase_ : Dict = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = retriever.retrieve(_snake_case ,n_docs=_snake_case ) self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(_snake_case ) ,2 ) self.assertEqual(sorted(doc_dicts[0] ) ,["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) ,_snake_case ) self.assertEqual(doc_dicts[0]["id"][0] ,"1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] ,"0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() ,[[1], [0]] ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: UpperCAmelCase_ : Union[str, Any] = self.get_dummy_dataset() retriever.save_pretrained(_snake_case ) UpperCAmelCase_ : Optional[Any] = RagRetriever.from_pretrained(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) UpperCAmelCase_ : Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) UpperCAmelCase_ : Dict = retriever.retrieve(_snake_case ,n_docs=1 ) self.assertTrue(out is not None ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = 1 UpperCAmelCase_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case ) UpperCAmelCase_ : Any = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = retriever.retrieve(_snake_case ,n_docs=_snake_case ) self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(_snake_case ) ,2 ) self.assertEqual(sorted(doc_dicts[0] ) ,["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) ,_snake_case ) self.assertEqual(doc_dicts[0]["id"][0] ,"1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] ,"0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() ,[[1], [0]] ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(_snake_case ) UpperCAmelCase_ : int = RagRetriever.from_pretrained(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) UpperCAmelCase_ : List[Any] = retriever.retrieve(_snake_case ,n_docs=1 ) self.assertTrue(out is not None ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = 1 UpperCAmelCase_ : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case ) UpperCAmelCase_ : Dict = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = retriever.retrieve(_snake_case ,n_docs=_snake_case ) self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(_snake_case ) ,2 ) self.assertEqual(sorted(doc_dicts[0] ) ,["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) ,_snake_case ) self.assertEqual(doc_dicts[0]["id"][0] ,"1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] ,"0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() ,[[1], [0]] ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(_snake_case ) UpperCAmelCase_ : str = RagRetriever.from_pretrained(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) UpperCAmelCase_ : Optional[int] = retriever.retrieve(_snake_case ,n_docs=1 ) self.assertTrue(out is not None ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = 1 UpperCAmelCase_ : List[str] = self.get_dummy_legacy_index_retriever() UpperCAmelCase_ : str = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = retriever.retrieve(_snake_case ,n_docs=_snake_case ) self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(_snake_case ) ,2 ) self.assertEqual(sorted(doc_dicts[0] ) ,["text", "title"] ) self.assertEqual(len(doc_dicts[0]["text"] ) ,_snake_case ) self.assertEqual(doc_dicts[0]["text"][0] ,"bar" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] ,"foo" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() ,[[1], [0]] ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Union[str, Any] = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(_snake_case ) UpperCAmelCase_ : Tuple = RagRetriever.from_pretrained(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) UpperCAmelCase_ : Dict = retriever.retrieve(_snake_case ,n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def UpperCamelCase__ ( self ): import torch UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : List[Any] = self.get_dummy_canonical_hf_index_retriever() UpperCAmelCase_ : Tuple = [[5, 7], [10, 11]] UpperCAmelCase_ : str = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) UpperCAmelCase_ : Optional[int] = retriever(_snake_case ,_snake_case ,prefix=retriever.config.generator.prefix ,n_docs=_snake_case ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertIsInstance(_snake_case ,np.ndarray ) UpperCAmelCase_ : Optional[Any] = retriever( _snake_case ,_snake_case ,prefix=retriever.config.generator.prefix ,n_docs=_snake_case ,return_tensors="pt" ,) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(_snake_case ,torch.Tensor ) self.assertIsInstance(_snake_case ,torch.Tensor ) self.assertIsInstance(_snake_case ,torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = self.get_dpr_ctx_encoder_tokenizer() UpperCAmelCase_ : int = 1 UpperCAmelCase_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case ) retriever.set_ctx_encoder_tokenizer(_snake_case ) UpperCAmelCase_ : Optional[int] = [[5, 7], [10, 11]] UpperCAmelCase_ : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) UpperCAmelCase_ : Optional[int] = retriever(_snake_case ,_snake_case ,prefix=retriever.config.generator.prefix ,n_docs=_snake_case ) self.assertEqual( len(_snake_case ) ,6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) ,_snake_case ) # check for doc token related keys in dictionary.
67
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig snake_case_ = [ """openmmlab/upernet-convnext-tiny""", # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring snake_case_ = """UperNetConfig""" class A_ ( nn.Module ): """simple docstring""" def __init__( self :Tuple , lowercase_ :int , lowercase_ :int , lowercase_ :Union[int, Tuple[int, int]] , lowercase_ :Union[int, Tuple[int, int], str] = 0 , lowercase_ :bool = False , lowercase_ :Union[int, Tuple[int, int]] = 1 , ) -> None: super().__init__() UpperCAmelCase = nn.Convad( in_channels=lowercase_ , out_channels=lowercase_ , kernel_size=lowercase_ , padding=lowercase_ , bias=lowercase_ , dilation=lowercase_ , ) UpperCAmelCase = nn.BatchNormad(lowercase_ ) UpperCAmelCase = nn.ReLU() def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :torch.Tensor ) -> torch.Tensor: UpperCAmelCase = self.conv(lowercase_ ) UpperCAmelCase = self.batch_norm(lowercase_ ) UpperCAmelCase = self.activation(lowercase_ ) return output class A_ ( nn.Module ): """simple docstring""" def __init__( self :Optional[Any] , lowercase_ :int , lowercase_ :int , lowercase_ :int ) -> None: super().__init__() UpperCAmelCase = [ nn.AdaptiveAvgPoolad(lowercase_ ), UperNetConvModule(lowercase_ , lowercase_ , kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(lowercase_ ) , lowercase_ ) def UpperCAmelCase__ ( self :List[str] , lowercase_ :torch.Tensor ) -> torch.Tensor: UpperCAmelCase = input for layer in self.layers: UpperCAmelCase = layer(lowercase_ ) return hidden_state class A_ ( nn.Module ): """simple docstring""" def __init__( self :Optional[Any] , lowercase_ :Tuple[int, ...] , lowercase_ :int , lowercase_ :int , lowercase_ :bool ) -> None: super().__init__() UpperCAmelCase = pool_scales UpperCAmelCase = align_corners UpperCAmelCase = in_channels UpperCAmelCase = channels UpperCAmelCase = [] for i, pool_scale in enumerate(lowercase_ ): UpperCAmelCase = UperNetPyramidPoolingBlock(pool_scale=lowercase_ , in_channels=lowercase_ , channels=lowercase_ ) self.blocks.append(lowercase_ ) self.add_module(str(lowercase_ ) , lowercase_ ) def UpperCAmelCase__ ( self :List[Any] , lowercase_ :torch.Tensor ) -> List[torch.Tensor]: UpperCAmelCase = [] for ppm in self.blocks: UpperCAmelCase = ppm(lowercase_ ) UpperCAmelCase = nn.functional.interpolate( lowercase_ , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners ) ppm_outs.append(lowercase_ ) return ppm_outs class A_ ( nn.Module ): """simple docstring""" def __init__( self :Dict , lowercase_ :Optional[Any] , lowercase_ :Optional[int] ) -> Any: super().__init__() UpperCAmelCase = config UpperCAmelCase = config.pool_scales # e.g. (1, 2, 3, 6) UpperCAmelCase = in_channels UpperCAmelCase = config.hidden_size UpperCAmelCase = False UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) # PSP Module UpperCAmelCase = UperNetPyramidPoolingModule( self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , ) UpperCAmelCase = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) # FPN Module UpperCAmelCase = nn.ModuleList() UpperCAmelCase = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer UpperCAmelCase = UperNetConvModule(lowercase_ , self.channels , kernel_size=1 ) UpperCAmelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 ) self.lateral_convs.append(lowercase_ ) self.fpn_convs.append(lowercase_ ) UpperCAmelCase = UperNetConvModule( len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) def UpperCAmelCase__ ( self :Optional[Any] ) -> List[Any]: self.apply(self._init_weights ) def UpperCAmelCase__ ( self :str , lowercase_ :Union[str, Any] ) -> str: if isinstance(lowercase_ , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def UpperCAmelCase__ ( self :Dict , lowercase_ :int ) -> int: UpperCAmelCase = inputs[-1] UpperCAmelCase = [x] psp_outs.extend(self.psp_modules(lowercase_ ) ) UpperCAmelCase = torch.cat(lowercase_ , dim=1 ) UpperCAmelCase = self.bottleneck(lowercase_ ) return output def UpperCAmelCase__ ( self :str , lowercase_ :torch.Tensor ) -> torch.Tensor: # build laterals UpperCAmelCase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(lowercase_ ) ) # build top-down path UpperCAmelCase = len(lowercase_ ) for i in range(used_backbone_levels - 1 , 0 , -1 ): UpperCAmelCase = laterals[i - 1].shape[2:] UpperCAmelCase = laterals[i - 1] + nn.functional.interpolate( laterals[i] , size=lowercase_ , mode='bilinear' , align_corners=self.align_corners ) # build outputs UpperCAmelCase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 , 0 , -1 ): UpperCAmelCase = nn.functional.interpolate( fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners ) UpperCAmelCase = torch.cat(lowercase_ , dim=1 ) UpperCAmelCase = self.fpn_bottleneck(lowercase_ ) UpperCAmelCase = self.classifier(lowercase_ ) return output class A_ ( nn.Module ): """simple docstring""" def __init__( self :Optional[Any] , lowercase_ :Optional[Any] , lowercase_ :int = 2 , lowercase_ :int = 3 , lowercase_ :Union[int, Tuple[int, int]] = 1 ) -> None: super().__init__() UpperCAmelCase = config UpperCAmelCase = config.auxiliary_in_channels UpperCAmelCase = config.auxiliary_channels UpperCAmelCase = config.auxiliary_num_convs UpperCAmelCase = config.auxiliary_concat_input UpperCAmelCase = in_index UpperCAmelCase = (kernel_size // 2) * dilation UpperCAmelCase = [] convs.append( UperNetConvModule( self.in_channels , self.channels , kernel_size=lowercase_ , padding=lowercase_ , dilation=lowercase_ ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels , self.channels , kernel_size=lowercase_ , padding=lowercase_ , dilation=lowercase_ ) ) if self.num_convs == 0: UpperCAmelCase = nn.Identity() else: UpperCAmelCase = nn.Sequential(*lowercase_ ) if self.concat_input: UpperCAmelCase = UperNetConvModule( self.in_channels + self.channels , self.channels , kernel_size=lowercase_ , padding=kernel_size // 2 ) UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) def UpperCAmelCase__ ( self :List[str] ) -> Dict: self.apply(self._init_weights ) def UpperCAmelCase__ ( self :int , lowercase_ :Any ) -> List[Any]: if isinstance(lowercase_ , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def UpperCAmelCase__ ( self :Dict , lowercase_ :torch.Tensor ) -> torch.Tensor: # just take the relevant feature maps UpperCAmelCase = encoder_hidden_states[self.in_index] UpperCAmelCase = self.convs(lowercase_ ) if self.concat_input: UpperCAmelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) ) UpperCAmelCase = self.classifier(lowercase_ ) return output class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = UperNetConfig __UpperCamelCase = """pixel_values""" __UpperCamelCase = True def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[str] ) -> Union[str, Any]: if isinstance(lowercase_ , lowercase_ ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def UpperCAmelCase__ ( self :int ) -> Tuple: self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def UpperCAmelCase__ ( self :Any , lowercase_ :Dict , lowercase_ :Union[str, Any]=False ) -> Optional[int]: if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = value snake_case_ = R""" Parameters: This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. config ([`UperNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ snake_case_ = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( """UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , SCREAMING_SNAKE_CASE_ , ) class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self :int , lowercase_ :Tuple ) -> int: super().__init__(lowercase_ ) UpperCAmelCase = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) UpperCAmelCase = UperNetHead(lowercase_ , in_channels=self.backbone.channels ) UpperCAmelCase = UperNetFCNHead(lowercase_ ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) ) @replace_return_docstrings(output_type=lowercase_ , config_class=_CONFIG_FOR_DOC ) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[torch.Tensor] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[torch.Tensor] = None , lowercase_ :Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]: UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions UpperCAmelCase = self.backbone.forward_with_filtered_kwargs( lowercase_ , output_hidden_states=lowercase_ , output_attentions=lowercase_ ) UpperCAmelCase = outputs.feature_maps UpperCAmelCase = self.decode_head(lowercase_ ) UpperCAmelCase = nn.functional.interpolate(lowercase_ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=lowercase_ ) UpperCAmelCase = None if self.auxiliary_head is not None: UpperCAmelCase = self.auxiliary_head(lowercase_ ) UpperCAmelCase = nn.functional.interpolate( lowercase_ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=lowercase_ ) UpperCAmelCase = None if labels is not None: if self.config.num_labels == 1: raise ValueError('The number of labels should be greater than one' ) else: # compute weighted loss UpperCAmelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) UpperCAmelCase = loss_fct(lowercase_ , lowercase_ ) UpperCAmelCase = loss_fct(lowercase_ , lowercase_ ) UpperCAmelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: UpperCAmelCase = (logits,) + outputs[1:] else: UpperCAmelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
78
"""simple docstring""" import os import jsonlines import numpy as np from tqdm import tqdm SCREAMING_SNAKE_CASE__ = 2_048 SCREAMING_SNAKE_CASE__ = 4_096 SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = os.environ.pop("PROCESS_TRAIN", "false") SCREAMING_SNAKE_CASE__ = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4} def lowerCAmelCase__ ( _UpperCamelCase : str ) -> Any: """simple docstring""" def choose_first(_UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=False ): assert isinstance(_UpperCamelCase , _UpperCamelCase ) if len(_UpperCamelCase ) == 1: snake_case = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: snake_case = {k: [a[k]] for k in a} if len(a['start_token'] ) > 0: break return a snake_case = {'id': example['id']} snake_case = example['annotations'] snake_case = annotation['yes_no_answer'] if 0 in yes_no_answer or 1 in yes_no_answer: snake_case = ['yes'] if 1 in yes_no_answer else ['no'] snake_case = snake_case = [] snake_case = snake_case = [] snake_case = ['<cls>'] else: snake_case = ['short'] snake_case = choose_first(annotation['short_answers'] ) if len(out['start_token'] ) == 0: # answer will be long if short is not available snake_case = ['long'] snake_case = choose_first(annotation['long_answer'] , is_long_answer=_UpperCamelCase ) snake_case = [] answer.update(_UpperCamelCase ) # disregard some samples if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]: snake_case = True else: snake_case = False snake_case = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text'] if not all(isinstance(answer[k] , _UpperCamelCase ) for k in cols ): raise ValueError('Issue in ID' , example['id'] ) return answer def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : List[str]=False ) -> Union[str, Any]: """simple docstring""" snake_case = _get_single_answer(_UpperCamelCase ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element snake_case = example['document']['tokens'] snake_case = [] for i in range(len(doc['token'] ) ): if not doc["is_html"][i]: context.append(doc['token'][i] ) return { "context": " ".join(_UpperCamelCase ), "answer": { "start_token": -1_0_0, # ignore index in cross-entropy "end_token": -1_0_0, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples snake_case = ['start_token', 'end_token'] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 snake_case = example['document']['tokens'] snake_case = answer['start_token'] snake_case = answer['end_token'] snake_case = [] for i in range(len(doc['token'] ) ): if not doc["is_html"][i]: context.append(doc['token'][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 snake_case = ' '.join(context[start_token:end_token] ) # checking above code if assertion: snake_case = doc['is_html'][answer['start_token'] : answer['end_token']] snake_case = doc['token'][answer['start_token'] : answer['end_token']] snake_case = ' '.join([old[i] for i in range(len(_UpperCamelCase ) ) if not is_html[i]] ) if new != old: print('ID:' , example['id'] ) print('New:' , _UpperCamelCase , end='\n' ) print('Old:' , _UpperCamelCase , end='\n\n' ) return { "context": " ".join(_UpperCamelCase ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : Optional[int]=2_0_4_8 , _UpperCamelCase : Union[str, Any]=4_0_9_6 , _UpperCamelCase : Dict=True ) -> Optional[Any]: """simple docstring""" snake_case = get_context_and_ans(_UpperCamelCase , assertion=_UpperCamelCase ) snake_case = out['answer'] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } snake_case = tokenizer(example['question']['text'] , out['context'] ).input_ids snake_case = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element snake_case = [] snake_case = [] snake_case = input_ids[:q_len] snake_case = range(_UpperCamelCase , len(_UpperCamelCase ) , max_length - doc_stride ) for i in doc_start_indices: snake_case = i + max_length - q_len snake_case = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer['category'][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-1_0_0] * len(_UpperCamelCase ), "end_token": [-1_0_0] * len(_UpperCamelCase ), "category": category, }, } snake_case = out['context'].split() snake_case = splitted_context[answer['end_token']] snake_case = len( tokenizer( ' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=_UpperCamelCase , ).input_ids ) snake_case = len( tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=_UpperCamelCase ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token snake_case = len(tokenizer(_UpperCamelCase , add_special_tokens=_UpperCamelCase ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 snake_case = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive snake_case = answer['start_token'] snake_case = answer['end_token'] if assertion: snake_case = tokenizer.decode(_UpperCamelCase ) if answer["span"] != new: print('ISSUE IN TOKENIZATION' ) print('OLD:' , answer['span'] ) print('NEW:' , _UpperCamelCase , end='\n\n' ) if len(_UpperCamelCase ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } snake_case = input_ids[:q_len] snake_case = range(_UpperCamelCase , len(_UpperCamelCase ) , max_length - doc_stride ) snake_case = [] snake_case = [] snake_case = [] snake_case = [] # null, yes, no, long, short for i in doc_start_indices: snake_case = i + max_length - q_len snake_case = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: snake_case = start_token - i + q_len snake_case = end_token - i + q_len answers_category.append(answer['category'][0] ) # ["short"] -> "short" else: snake_case = -1_0_0 snake_case = -1_0_0 answers_category.append('null' ) snake_case = inputs[-1][start_token : end_token + 1] answers_start_token.append(_UpperCamelCase ) answers_end_token.append(_UpperCamelCase ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print('ISSUE in strided for ID:' , example['id'] ) print('New:' , tokenizer.decode(_UpperCamelCase ) ) print('Old:' , tokenizer.decode(_UpperCamelCase ) , end='\n\n' ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=2_0_4_8 , _UpperCamelCase : Union[str, Any]=4_0_9_6 , _UpperCamelCase : List[str]=False ) -> Union[str, Any]: """simple docstring""" snake_case = get_strided_contexts_and_ans( _UpperCamelCase , _UpperCamelCase , doc_stride=_UpperCamelCase , max_length=_UpperCamelCase , assertion=_UpperCamelCase , ) return example def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : List[str] ) -> Any: """simple docstring""" with jsonlines.open(_UpperCamelCase , 'a' ) as writer: for example in tqdm(_UpperCamelCase , total=len(_UpperCamelCase ) , desc='Saving samples ... ' ): snake_case = example['labels'] for ids, start, end, cat in zip( example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { 'input_ids': ids, 'start_token': start, 'end_token': end, 'category': CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer SCREAMING_SNAKE_CASE__ = load_dataset("natural_questions") SCREAMING_SNAKE_CASE__ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base") SCREAMING_SNAKE_CASE__ = data["train" if PROCESS_TRAIN == "true" else "validation"] SCREAMING_SNAKE_CASE__ = { "tokenizer": tokenizer, "doc_stride": DOC_STRIDE, "max_length": MAX_LENGTH, "assertion": False, } SCREAMING_SNAKE_CASE__ = data.map(prepare_inputs, fn_kwargs=fn_kwargs) SCREAMING_SNAKE_CASE__ = data.remove_columns(["annotations", "document", "id", "question"]) print(data) np.random.seed(SEED) SCREAMING_SNAKE_CASE__ = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl" save_to_disk(data, file_name=cache_file_name)
150
0
import qiskit def lowerCamelCase__ ( A__ : int , A__ : int ): '''simple docstring''' __lowerCamelCase = qiskit.Aer.get_backend("""aer_simulator""" ) __lowerCamelCase = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator __lowerCamelCase = qiskit.execute(A__ , A__ , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(A__ ) if __name__ == "__main__": UpperCAmelCase_ = half_adder(1, 1) print(f"""Half Adder Output Qubit Counts: {counts}""")
29
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Union[str, Any] = 'yolos' def __init__( self: Dict , UpperCamelCase_: List[Any]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: int=0.0 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Dict=1E-12 , UpperCamelCase_: List[Any]=[5_12, 8_64] , UpperCamelCase_: Optional[int]=16 , UpperCamelCase_: Any=3 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: List[str]=1_00 , UpperCamelCase_: List[str]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: Any=5 , UpperCamelCase_: Any=2 , UpperCamelCase_: Tuple=5 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=0.1 , **UpperCamelCase_: Any , ): super().__init__(**UpperCamelCase_ ) __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = qkv_bias __lowerCamelCase = num_detection_tokens __lowerCamelCase = use_mid_position_embeddings __lowerCamelCase = auxiliary_loss # Hungarian matcher __lowerCamelCase = class_cost __lowerCamelCase = bbox_cost __lowerCamelCase = giou_cost # Loss coefficients __lowerCamelCase = bbox_loss_coefficient __lowerCamelCase = giou_loss_coefficient __lowerCamelCase = eos_coefficient class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Tuple = version.parse('1.11') @property def lowerCAmelCase__ ( self: Any ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase__ ( self: Dict ): return 1E-4 @property def lowerCAmelCase__ ( self: Dict ): return 12
29
1
from __future__ import annotations def UpperCAmelCase ( lowercase ): # This function is recursive """simple docstring""" __lowercase = len(__SCREAMING_SNAKE_CASE ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else __lowercase = array[0] __lowercase = False __lowercase = 1 __lowercase = [] while not is_found and i < array_length: if array[i] < pivot: __lowercase = True __lowercase = [element for element in array[i:] if element >= array[i]] __lowercase = longest_subsequence(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > len(__SCREAMING_SNAKE_CASE ): __lowercase = temp_array else: i += 1 __lowercase = [element for element in array[1:] if element >= pivot] __lowercase = [pivot, *longest_subsequence(__SCREAMING_SNAKE_CASE )] if len(__SCREAMING_SNAKE_CASE ) > len(__SCREAMING_SNAKE_CASE ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
210
"""simple docstring""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ): print('Loading config file...' ) def flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any="" , __SCREAMING_SNAKE_CASE : List[Any]="." ): lowercase_ : List[str] = [] for k, v in d.items(): lowercase_ : Dict = parent_key + sep + k if parent_key else k if isinstance(__SCREAMING_SNAKE_CASE , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sep=__SCREAMING_SNAKE_CASE ).items() ) else: items.append((new_key, v) ) return dict(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = argparse.Namespace() with open(__SCREAMING_SNAKE_CASE , 'r' ) as yaml_file: try: lowercase_ : str = yaml.load(__SCREAMING_SNAKE_CASE , Loader=yaml.FullLoader ) lowercase_ : List[Any] = flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE ) for k, v in flat_cfg.items(): setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) except yaml.YAMLError as exc: logger.error('Error while loading config file: {}. Error message: {}'.format(__SCREAMING_SNAKE_CASE , str(__SCREAMING_SNAKE_CASE ) ) ) return config def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] ): lowercase_ : int = MobileViTVaConfig() lowercase_ : List[str] = False # dataset if task_name.startswith('imagenet1k_' ): lowercase_ : List[Any] = 10_00 if int(task_name.strip().split('_' )[-1] ) == 3_84: lowercase_ : str = 3_84 else: lowercase_ : Dict = 2_56 lowercase_ : int = 'imagenet-1k-id2label.json' elif task_name.startswith('imagenet21k_to_1k_' ): lowercase_ : int = 2_10_00 if int(task_name.strip().split('_' )[-1] ) == 3_84: lowercase_ : Optional[Any] = 3_84 else: lowercase_ : Tuple = 2_56 lowercase_ : List[str] = 'imagenet-22k-id2label.json' elif task_name.startswith('ade20k_' ): lowercase_ : int = 1_51 lowercase_ : Optional[Any] = 5_12 lowercase_ : str = 'ade20k-id2label.json' lowercase_ : List[Any] = True elif task_name.startswith('voc_' ): lowercase_ : Union[str, Any] = 21 lowercase_ : Tuple = 5_12 lowercase_ : List[str] = 'pascal-voc-id2label.json' lowercase_ : str = True # orig_config lowercase_ : Optional[int] = load_orig_config_file(__SCREAMING_SNAKE_CASE ) assert getattr(__SCREAMING_SNAKE_CASE , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model" lowercase_ : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , 'model.classification.mitv2.width_multiplier' , 1.0 ) assert ( getattr(__SCREAMING_SNAKE_CASE , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.classification.activation.name' , 'swish' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.output_stride' , 16 ) if "_deeplabv3" in task_name: lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] ) lowercase_ : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_12 ) lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 ) # id2label lowercase_ : Optional[Any] = 'huggingface/label-files' lowercase_ : List[Any] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : List[str] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase_ : int = idalabel lowercase_ : List[Any] = {v: k for k, v in idalabel.items()} return config def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ): lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = val def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=False ): if base_model: lowercase_ : int = '' else: lowercase_ : str = 'mobilevitv2.' lowercase_ : Dict = [] for k in state_dict.keys(): if k[:8] == "encoder.": lowercase_ : Dict = k[8:] else: lowercase_ : Union[str, Any] = k if ".block." in k: lowercase_ : List[str] = k_new.replace('.block.' , '.' ) if ".conv." in k: lowercase_ : List[Any] = k_new.replace('.conv.' , '.convolution.' ) if ".norm." in k: lowercase_ : str = k_new.replace('.norm.' , '.normalization.' ) if "conv_1." in k: lowercase_ : Dict = k_new.replace('conv_1.' , F'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if F'''layer_{i}.''' in k: lowercase_ : Tuple = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: lowercase_ : Any = k_new.replace('.exp_1x1.' , '.expand_1x1.' ) if ".red_1x1." in k: lowercase_ : str = k_new.replace('.red_1x1.' , '.reduce_1x1.' ) for i in [3, 4, 5]: if F'''layer_{i}.0.''' in k: lowercase_ : Tuple = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if F'''layer_{i}.1.local_rep.0.''' in k: lowercase_ : Any = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if F'''layer_{i}.1.local_rep.1.''' in k: lowercase_ : List[Any] = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: lowercase_ : Dict = [0, 1] elif i == 4: lowercase_ : int = [0, 1, 2, 3] elif i == 5: lowercase_ : List[str] = [0, 1, 2] for j in j_in: if F'''layer_{i}.1.global_rep.{j}.''' in k: lowercase_ : List[str] = k_new.replace( F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if F'''layer_{i}.1.global_rep.{j+1}.''' in k: lowercase_ : int = k_new.replace( F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if F'''layer_{i}.1.conv_proj.''' in k: lowercase_ : str = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: lowercase_ : Optional[Any] = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' ) if "pre_norm_attn.1." in k: lowercase_ : Any = k_new.replace('pre_norm_attn.1.' , 'attention.' ) if "pre_norm_ffn.0." in k: lowercase_ : List[str] = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' ) if "pre_norm_ffn.1." in k: lowercase_ : int = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' ) if "pre_norm_ffn.3." in k: lowercase_ : str = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' ) if "classifier.1." in k: lowercase_ : Union[str, Any] = k_new.replace('classifier.1.' , 'classifier.' ) if "seg_head." in k: lowercase_ : Optional[int] = k_new.replace('seg_head.' , 'segmentation_head.' ) if ".aspp_layer." in k: lowercase_ : Dict = k_new.replace('.aspp_layer.' , '.' ) if ".aspp_pool." in k: lowercase_ : Dict = k_new.replace('.aspp_pool.' , '.' ) rename_keys.append((k, k_new) ) return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Any ): lowercase_ : str = [] for k in state_dict.keys(): if k.startswith('seg_head.aux_head.' ): keys_to_ignore.append(__SCREAMING_SNAKE_CASE ) for k in keys_to_ignore: state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( ): lowercase_ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ): lowercase_ : Tuple = get_mobilevitva_config(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # load original state_dict lowercase_ : Tuple = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' ) # load huggingface model if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ): lowercase_ : Tuple = MobileViTVaForSemanticSegmentation(__SCREAMING_SNAKE_CASE ).eval() lowercase_ : Optional[int] = False else: lowercase_ : Any = MobileViTVaForImageClassification(__SCREAMING_SNAKE_CASE ).eval() lowercase_ : int = False # remove and rename some keys of load the original model lowercase_ : Any = checkpoint remove_unused_keys(__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , base_model=__SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # load modified state_dict model.load_state_dict(__SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by MobileViTImageProcessor lowercase_ : Union[str, Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowercase_ : Any = image_processor(images=prepare_img() , return_tensors='pt' ) lowercase_ : Optional[int] = model(**__SCREAMING_SNAKE_CASE ) # verify classification model if task_name.startswith('imagenet' ): lowercase_ : List[str] = outputs.logits lowercase_ : int = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0: # expected_logits for base variant lowercase_ : Optional[int] = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ) assert torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
213
0
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def a_ ( lowerCAmelCase_ : bytes, lowerCAmelCase_ : int ): __lowerCAmelCase = F"""{sampling_rate}""" __lowerCAmelCase = '1' __lowerCAmelCase = 'f32le' __lowerCAmelCase = [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(lowerCAmelCase_, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process: __lowerCAmelCase = ffmpeg_process.communicate(lowerCAmelCase_ ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error __lowerCAmelCase = output_stream[0] __lowerCAmelCase = np.frombuffer(lowerCAmelCase_, np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : float, lowerCAmelCase_ : str = "f32le", ): __lowerCAmelCase = F"""{sampling_rate}""" __lowerCAmelCase = '1' if format_for_conversion == "s16le": __lowerCAmelCase = 2 elif format_for_conversion == "f32le": __lowerCAmelCase = 4 else: raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) __lowerCAmelCase = platform.system() if system == "Linux": __lowerCAmelCase = 'alsa' __lowerCAmelCase = 'default' elif system == "Darwin": __lowerCAmelCase = 'avfoundation' __lowerCAmelCase = ':0' elif system == "Windows": __lowerCAmelCase = 'dshow' __lowerCAmelCase = 'default' __lowerCAmelCase = [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] __lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample __lowerCAmelCase = _ffmpeg_stream(lowerCAmelCase_, lowerCAmelCase_ ) for item in iterator: yield item def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : float, lowerCAmelCase_ : Optional[int] = None, lowerCAmelCase_ : Optional[Union[Tuple[float, float], float]] = None, lowerCAmelCase_ : str = "f32le", ): if stream_chunk_s is not None: __lowerCAmelCase = stream_chunk_s else: __lowerCAmelCase = chunk_length_s __lowerCAmelCase = ffmpeg_microphone(lowerCAmelCase_, lowerCAmelCase_, format_for_conversion=lowerCAmelCase_ ) if format_for_conversion == "s16le": __lowerCAmelCase = np.intaa __lowerCAmelCase = 2 elif format_for_conversion == "f32le": __lowerCAmelCase = np.floataa __lowerCAmelCase = 4 else: raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) if stride_length_s is None: __lowerCAmelCase = chunk_length_s / 6 __lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCAmelCase_, (int, float) ): __lowerCAmelCase = [stride_length_s, stride_length_s] __lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample __lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample __lowerCAmelCase = datetime.datetime.now() __lowerCAmelCase = datetime.timedelta(seconds=lowerCAmelCase_ ) for item in chunk_bytes_iter(lowerCAmelCase_, lowerCAmelCase_, stride=(stride_left, stride_right), stream=lowerCAmelCase_ ): # Put everything back in numpy scale __lowerCAmelCase = np.frombuffer(item['raw'], dtype=lowerCAmelCase_ ) __lowerCAmelCase = ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) __lowerCAmelCase = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : int, lowerCAmelCase_ : Tuple[int, int], lowerCAmelCase_ : bool = False ): __lowerCAmelCase = B'' __lowerCAmelCase , __lowerCAmelCase = stride if stride_left + stride_right >= chunk_len: raise ValueError( F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" ) __lowerCAmelCase = 0 for raw in iterator: acc += raw if stream and len(lowerCAmelCase_ ) < chunk_len: __lowerCAmelCase = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCAmelCase_ ) >= chunk_len: # We are flushing the accumulator __lowerCAmelCase = (_stride_left, stride_right) __lowerCAmelCase = {'raw': acc[:chunk_len], 'stride': stride} if stream: __lowerCAmelCase = False yield item __lowerCAmelCase = stride_left __lowerCAmelCase = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCAmelCase_ ) > stride_left: __lowerCAmelCase = {'raw': acc, 'stride': (_stride_left, 0)} if stream: __lowerCAmelCase = False yield item def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : int ): __lowerCAmelCase = 2**24 # 16Mo try: with subprocess.Popen(lowerCAmelCase_, stdout=subprocess.PIPE, bufsize=lowerCAmelCase_ ) as ffmpeg_process: while True: __lowerCAmelCase = ffmpeg_process.stdout.read(lowerCAmelCase_ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
207
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _snake_case : Optional[Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : int = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Tuple = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[str] = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys _snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
207
1
from math import ceil def lowerCAmelCase_ ( __A, __A ) -> Optional[int]: '''simple docstring''' UpperCAmelCase__ = list(range(0, __A ) ) UpperCAmelCase__ = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check UpperCAmelCase__ = [] for i in device_map_blocks: if device_map_blocks.count(__A ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(__A ) # Missing blocks UpperCAmelCase__ = [i for i in blocks if i not in device_map_blocks] UpperCAmelCase__ = [i for i in device_map_blocks if i not in blocks] if len(__A ) != 0: raise ValueError( "Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device." " These attention blocks were specified more than once: " + str(__A ) ) if len(__A ) != 0: raise ValueError( "There are attention blocks for this model that are not specified in the device_map. Add these attention " "blocks to a device on the device_map: " + str(__A ) ) if len(__A ) != 0: raise ValueError( "The device_map contains more attention blocks than this model has. Remove these from the device_map:" + str(__A ) ) def lowerCAmelCase_ ( __A, __A ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ = list(range(__A ) ) UpperCAmelCase__ = int(ceil(n_layers / len(__A ) ) ) UpperCAmelCase__ = [layers[i : i + n_blocks] for i in range(0, __A, __A )] return dict(zip(__A, __A ) )
65
def _UpperCAmelCase ( snake_case = 50 ): """simple docstring""" _lowerCAmelCase = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f"{solution() = }")
82
0
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class __a ( lowerCAmelCase__ ): def snake_case_ ( self , a__ ): with open(a__ , encoding='utf-8' ) as input_file: _lowerCamelCase = re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' ) _lowerCamelCase = input_file.read() _lowerCamelCase = regexp.search(a__ ) return match def snake_case_ ( self , a__ ): with open(a__ , encoding='utf-8' ) as input_file: _lowerCamelCase = re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL ) _lowerCamelCase = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` _lowerCamelCase = regexp.finditer(a__ ) _lowerCamelCase = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def snake_case_ ( self ): _lowerCamelCase = Path('./datasets' ) _lowerCamelCase = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(a__ ) ): raise AssertionError(F'open(...) must use utf-8 encoding in {dataset}' ) def snake_case_ ( self ): _lowerCamelCase = Path('./datasets' ) _lowerCamelCase = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_print_statements(str(a__ ) ): raise AssertionError(F'print statement found in {dataset}. Use datasets.logger/logging instead.' )
80
"""simple docstring""" import numpy as np def SCREAMING_SNAKE_CASE_ ( snake_case : np.ndarray )-> np.ndarray: return 1 / (1 + np.exp(-vector )) def SCREAMING_SNAKE_CASE_ ( snake_case : np.ndarray )-> np.ndarray: return vector * sigmoid(snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
80
1
"""simple docstring""" import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = tempfile.mkdtemp() A = 8 # DPR tok A = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] A = os.path.join(self.tmpdirname ,'dpr_tokenizer' ) os.makedirs(A_ ,exist_ok=A_ ) A = os.path.join(A_ ,DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok A = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] A = {'unk_token': '<unk>'} A = os.path.join(self.tmpdirname ,'bart_tokenizer' ) os.makedirs(A_ ,exist_ok=A_ ) A = os.path.join(A_ ,BART_VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(A_ ,BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,'dpr_tokenizer' ) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,'bart_tokenizer' ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) @require_tokenizers def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: A = os.path.join(self.tmpdirname ,'rag_tokenizer' ) A = RagConfig(question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ) A = RagTokenizer(question_encoder=self.get_dpr_tokenizer() ,generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(A_ ) rag_tokenizer.save_pretrained(A_ ) A = RagTokenizer.from_pretrained(A_ ,config=A_ ) self.assertIsInstance(new_rag_tokenizer.question_encoder ,A_ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() ,rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator ,A_ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() ,rag_tokenizer.generator.get_vocab() ) @slow def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: A = RagTokenizer.from_pretrained('facebook/rag-token-nq' ) A = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] A = tokenizer(A_ ) self.assertIsNotNone(A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: A = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' ) A = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] A = tokenizer(A_ ) self.assertIsNotNone(A_ )
74
"""simple docstring""" import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = DDIMPipeline _a = UNCONDITIONAL_IMAGE_GENERATION_PARAMS _a = PipelineTesterMixin.required_optional_params - { 'num_images_per_prompt', 'latents', 'callback', 'callback_steps', } _a = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS _a = False def snake_case ( self : str )-> Optional[Any]: torch.manual_seed(0 ) lowerCamelCase__ : Union[str, Any] =UNetaDModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), ) lowerCamelCase__ : Optional[Any] =DDIMScheduler() lowerCamelCase__ : List[Any] ={'''unet''': unet, '''scheduler''': scheduler} return components def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : Optional[Any]=0 )-> Optional[int]: if str(lowerCamelCase ).startswith('''mps''' ): lowerCamelCase__ : Dict =torch.manual_seed(lowerCamelCase ) else: lowerCamelCase__ : Optional[int] =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) lowerCamelCase__ : Tuple ={ '''batch_size''': 1, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def snake_case ( self : Dict )-> str: lowerCamelCase__ : Optional[Any] ='''cpu''' lowerCamelCase__ : int =self.get_dummy_components() lowerCamelCase__ : Optional[int] =self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) lowerCamelCase__ : List[str] =self.get_dummy_inputs(lowerCamelCase ) lowerCamelCase__ : Any =pipe(**lowerCamelCase ).images lowerCamelCase__ : Optional[Any] =image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 32, 32, 3) ) lowerCamelCase__ : Tuple =np.array( [1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] ) lowerCamelCase__ : str =np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase, 1E-3 ) def snake_case ( self : Union[str, Any] )-> List[Any]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def snake_case ( self : Union[str, Any] )-> int: super().test_save_load_local(expected_max_difference=3E-3 ) def snake_case ( self : List[Any] )-> List[Any]: super().test_save_load_optional_components(expected_max_difference=3E-3 ) def snake_case ( self : Optional[Any] )-> Optional[int]: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : Optional[Any] )-> List[str]: lowerCamelCase__ : Optional[Any] ='''google/ddpm-cifar10-32''' lowerCamelCase__ : Union[str, Any] =UNetaDModel.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[int] =DDIMScheduler() lowerCamelCase__ : int =DDIMPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase ) ddim.to(lowerCamelCase ) ddim.set_progress_bar_config(disable=lowerCamelCase ) lowerCamelCase__ : Tuple =torch.manual_seed(0 ) lowerCamelCase__ : int =ddim(generator=lowerCamelCase, eta=0.0, output_type='''numpy''' ).images lowerCamelCase__ : Tuple =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase__ : Any =np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def snake_case ( self : Optional[int] )-> Any: lowerCamelCase__ : str ='''google/ddpm-ema-bedroom-256''' lowerCamelCase__ : Optional[int] =UNetaDModel.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Any =DDIMScheduler.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =DDIMPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase ) ddpm.to(lowerCamelCase ) ddpm.set_progress_bar_config(disable=lowerCamelCase ) lowerCamelCase__ : List[str] =torch.manual_seed(0 ) lowerCamelCase__ : Optional[Any] =ddpm(generator=lowerCamelCase, output_type='''numpy''' ).images lowerCamelCase__ : Any =image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCamelCase__ : Any =np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
238
0
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig lowerCamelCase : Optional[Any] =logging.get_logger(__name__) # General docstring lowerCamelCase : Optional[Any] ='''RegNetConfig''' # Base docstring lowerCamelCase : List[str] ='''facebook/regnet-y-040''' lowerCamelCase : Any =[1, 1088, 7, 7] # Image classification docstring lowerCamelCase : Any ='''facebook/regnet-y-040''' lowerCamelCase : Dict ='''tabby, tabby cat''' lowerCamelCase : List[Any] =[ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class __a ( nn.Module ): def __init__( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int = 3 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : Optional[str] = "relu" , ): '''simple docstring''' super().__init__() UpperCamelCase__ : str = nn.Convad( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , kernel_size=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , padding=kernel_size // 2 , groups=SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE , ) UpperCamelCase__ : Optional[Any] = nn.BatchNormad(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity() def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' UpperCamelCase__ : Optional[Any] = self.convolution(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Union[str, Any] = self.normalization(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Optional[int] = self.activation(SCREAMING_SNAKE_CASE ) return hidden_state class __a ( nn.Module ): def __init__( self : Dict , SCREAMING_SNAKE_CASE : RegNetConfig ): '''simple docstring''' super().__init__() UpperCamelCase__ : Union[str, Any] = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) UpperCamelCase__ : Union[str, Any] = config.num_channels def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' UpperCamelCase__ : Any = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) UpperCamelCase__ : Dict = self.embedder(SCREAMING_SNAKE_CASE ) return hidden_state class __a ( nn.Module ): def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int = 2 ): '''simple docstring''' super().__init__() UpperCamelCase__ : List[Any] = nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , kernel_size=1 , stride=SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE ) UpperCamelCase__ : str = nn.BatchNormad(SCREAMING_SNAKE_CASE ) def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Tensor ): '''simple docstring''' UpperCamelCase__ : Optional[Any] = self.convolution(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Union[str, Any] = self.normalization(SCREAMING_SNAKE_CASE ) return hidden_state class __a ( nn.Module ): def __init__( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): '''simple docstring''' super().__init__() UpperCamelCase__ : Any = nn.AdaptiveAvgPoolad((1, 1) ) UpperCamelCase__ : Dict = nn.Sequential( nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , kernel_size=1 ) , nn.ReLU() , nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , kernel_size=1 ) , nn.Sigmoid() , ) def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' UpperCamelCase__ : Union[str, Any] = self.pooler(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Any = self.attention(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : str = hidden_state * attention return hidden_state class __a ( nn.Module ): def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : RegNetConfig , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int = 1 ): '''simple docstring''' super().__init__() UpperCamelCase__ : Tuple = in_channels != out_channels or stride != 1 UpperCamelCase__ : str = max(1 , out_channels // config.groups_width ) UpperCamelCase__ : Union[str, Any] = ( RegNetShortCut(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity() ) UpperCamelCase__ : str = nn.Sequential( RegNetConvLayer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , groups=SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , kernel_size=1 , activation=SCREAMING_SNAKE_CASE ) , ) UpperCamelCase__ : List[str] = ACTaFN[config.hidden_act] def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' UpperCamelCase__ : Optional[int] = hidden_state UpperCamelCase__ : List[str] = self.layer(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[Any] = self.shortcut(SCREAMING_SNAKE_CASE ) hidden_state += residual UpperCamelCase__ : Dict = self.activation(SCREAMING_SNAKE_CASE ) return hidden_state class __a ( nn.Module ): def __init__( self : int , SCREAMING_SNAKE_CASE : RegNetConfig , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int = 1 ): '''simple docstring''' super().__init__() UpperCamelCase__ : Tuple = in_channels != out_channels or stride != 1 UpperCamelCase__ : List[Any] = max(1 , out_channels // config.groups_width ) UpperCamelCase__ : Optional[int] = ( RegNetShortCut(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity() ) UpperCamelCase__ : Optional[int] = nn.Sequential( RegNetConvLayer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , groups=SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , RegNetSELayer(SCREAMING_SNAKE_CASE , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , kernel_size=1 , activation=SCREAMING_SNAKE_CASE ) , ) UpperCamelCase__ : Optional[Any] = ACTaFN[config.hidden_act] def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' UpperCamelCase__ : Tuple = hidden_state UpperCamelCase__ : List[str] = self.layer(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Optional[int] = self.shortcut(SCREAMING_SNAKE_CASE ) hidden_state += residual UpperCamelCase__ : Tuple = self.activation(SCREAMING_SNAKE_CASE ) return hidden_state class __a ( nn.Module ): def __init__( self : int , SCREAMING_SNAKE_CASE : RegNetConfig , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , ): '''simple docstring''' super().__init__() UpperCamelCase__ : Any = RegNetXLayer if config.layer_type == "x" else RegNetYLayer UpperCamelCase__ : Optional[Any] = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , ) , *[layer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for _ in range(depth - 1 )] , ) def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' UpperCamelCase__ : int = self.layers(SCREAMING_SNAKE_CASE ) return hidden_state class __a ( nn.Module ): def __init__( self : List[str] , SCREAMING_SNAKE_CASE : RegNetConfig ): '''simple docstring''' super().__init__() UpperCamelCase__ : Optional[Any] = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) UpperCamelCase__ : str = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE , config.depths[1:] ): self.stages.append(RegNetStage(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , depth=SCREAMING_SNAKE_CASE ) ) def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Tensor , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = True ): '''simple docstring''' UpperCamelCase__ : Tuple = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: UpperCamelCase__ : Union[str, Any] = hidden_states + (hidden_state,) UpperCamelCase__ : Optional[Any] = stage_module(SCREAMING_SNAKE_CASE ) if output_hidden_states: UpperCamelCase__ : int = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE , hidden_states=SCREAMING_SNAKE_CASE ) class __a ( A__ ): _lowerCAmelCase : Optional[Any] = RegNetConfig _lowerCAmelCase : List[str] = '''regnet''' _lowerCAmelCase : int = '''pixel_values''' _lowerCAmelCase : List[str] = True def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" ) elif isinstance(SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]=False ): '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): UpperCamelCase__ : List[Any] = value lowerCamelCase : Optional[int] =R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' lowerCamelCase : List[Any] =R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''' , A__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class __a ( A__ ): def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[Any] = config UpperCamelCase__ : str = RegNetEmbeddings(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Optional[Any] = RegNetEncoder(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __lowercase ( self : str , SCREAMING_SNAKE_CASE : Tensor , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None ): '''simple docstring''' UpperCamelCase__ : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase__ : Union[str, Any] = self.embedder(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Any = self.encoder( SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[Any] = encoder_outputs[0] UpperCamelCase__ : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE , pooler_output=SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , A__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class __a ( A__ ): def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : int ): '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Optional[Any] = config.num_labels UpperCamelCase__ : Dict = RegNetModel(SCREAMING_SNAKE_CASE ) # classification head UpperCamelCase__ : Dict = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __lowercase ( self : str , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , ): '''simple docstring''' UpperCamelCase__ : Any = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase__ : Tuple = self.regnet(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Optional[int] = outputs.pooler_output if return_dict else outputs[1] UpperCamelCase__ : Any = self.classifier(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[Any] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: UpperCamelCase__ : List[str] = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): UpperCamelCase__ : Any = "single_label_classification" else: UpperCamelCase__ : List[str] = "multi_label_classification" if self.config.problem_type == "regression": UpperCamelCase__ : Union[str, Any] = MSELoss() if self.num_labels == 1: UpperCamelCase__ : Dict = loss_fct(logits.squeeze() , labels.squeeze() ) else: UpperCamelCase__ : str = loss_fct(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) elif self.config.problem_type == "single_label_classification": UpperCamelCase__ : List[str] = CrossEntropyLoss() UpperCamelCase__ : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": UpperCamelCase__ : Dict = BCEWithLogitsLoss() UpperCamelCase__ : List[str] = loss_fct(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if not return_dict: UpperCamelCase__ : Any = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
350
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Any: # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) UpperCamelCase__ : int = (boundary[1] - boundary[0]) / steps UpperCamelCase__ : Optional[Any] = boundary[0] UpperCamelCase__ : List[Any] = boundary[1] UpperCamelCase__ : List[Any] = make_points(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) UpperCamelCase__ : int = 0.0 y += (h / 2.0) * f(__lowerCAmelCase ) for i in x_i: # print(i) y += h * f(__lowerCAmelCase ) y += (h / 2.0) * f(__lowerCAmelCase ) return y def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]: UpperCamelCase__ : Optional[int] = a + h while x < (b - h): yield x UpperCamelCase__ : Union[str, Any] = x + h def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any: # enter your function here UpperCamelCase__ : Dict = (x - 0) * (x - 0) return y def SCREAMING_SNAKE_CASE ( ) -> Dict: UpperCamelCase__ : List[Any] = 0.0 # Lower bound of integration UpperCamelCase__ : Tuple = 1.0 # Upper bound of integration UpperCamelCase__ : Any = 1_0.0 # define number of steps or resolution UpperCamelCase__ : List[str] = [a, b] # define boundary of integration UpperCamelCase__ : Any = method_a(__lowerCAmelCase , __lowerCAmelCase ) print(f'y = {y}' ) if __name__ == "__main__": main()
196
0
"""simple docstring""" import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=() , _UpperCamelCase=None , _UpperCamelCase="no" , _UpperCamelCase="29500" ): '''simple docstring''' __lowerCAmelCase = False __lowerCAmelCase = False if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ): __lowerCAmelCase = True elif "IPython" in sys.modules: __lowerCAmelCase = "google.colab" in str(sys.modules["IPython"].get_ipython() ) try: __lowerCAmelCase = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , _UpperCamelCase ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " "your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if num_processes is None: __lowerCAmelCase = 8 __lowerCAmelCase = PrepareForLaunch(_UpperCamelCase , distributed_type="TPU" ) print(f"Launching a training on {num_processes} TPU cores." ) xmp.spawn(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method="fork" ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on one CPU." ) function(*_UpperCamelCase ) else: if num_processes is None: raise ValueError( "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized " "inside your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if torch.cuda.is_initialized(): raise ValueError( "To launch a multi-GPU training from your notebook, you need to avoid running any instruction " "using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA " "function." ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=_UpperCamelCase , master_addr="127.0.01" , master_port=_UpperCamelCase , mixed_precision=_UpperCamelCase ): __lowerCAmelCase = PrepareForLaunch(_UpperCamelCase , distributed_type="MULTI_GPU" ) print(f"Launching training on {num_processes} GPUs." ) try: start_processes(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method="fork" ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. " "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " "Please review your imports and test them when running the `notebook_launcher()` to identify " "which one is problematic." ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): __lowerCAmelCase = "1" print("Launching training on MPS." ) elif torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on CPU." ) function(*_UpperCamelCase ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=() , _UpperCamelCase=2 ): '''simple docstring''' from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=_UpperCamelCase , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ): __lowerCAmelCase = PrepareForLaunch(_UpperCamelCase , debug=_UpperCamelCase ) start_processes(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method="fork" )
57
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) __snake_case : List[str] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) sd_pipe.set_scheduler('''sample_euler''' ) __snake_case : Tuple = '''A painting of a squirrel eating a burger''' __snake_case : str = torch.manual_seed(0 ) __snake_case : int = sd_pipe([prompt] , generator=a_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) __snake_case : List[str] = output.images __snake_case : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __snake_case : Dict = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) __snake_case : Union[str, Any] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) sd_pipe.set_scheduler('''sample_euler''' ) __snake_case : str = '''A painting of a squirrel eating a burger''' __snake_case : List[str] = torch.manual_seed(0 ) __snake_case : Dict = sd_pipe([prompt] , generator=a_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) __snake_case : Any = output.images __snake_case : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __snake_case : Tuple = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) __snake_case : Union[str, Any] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) __snake_case : int = '''A painting of a squirrel eating a burger''' __snake_case : Optional[Any] = torch.manual_seed(0 ) __snake_case : Dict = sd_pipe( [prompt] , generator=a_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=a_ , ) __snake_case : Optional[int] = output.images __snake_case : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __snake_case : Any = np.array( [0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
102
0
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict=13 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Optional[Any]=24 , _UpperCAmelCase : int=16 , _UpperCAmelCase : Any=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Any=32 , _UpperCAmelCase : str=5 , _UpperCAmelCase : str=4 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Optional[int]=10 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Union[str, Any]=2 , ): """simple docstring""" UpperCAmelCase__ = parent UpperCAmelCase__ = batch_size UpperCAmelCase__ = patch_size UpperCAmelCase__ = max_length UpperCAmelCase__ = num_mel_bins UpperCAmelCase__ = is_training UpperCAmelCase__ = use_labels UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_act UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = type_sequence_label_size UpperCAmelCase__ = initializer_range UpperCAmelCase__ = scope UpperCAmelCase__ = frequency_stride UpperCAmelCase__ = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) UpperCAmelCase__ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 UpperCAmelCase__ = (self.max_length - self.patch_size) // self.time_stride + 1 UpperCAmelCase__ = frequency_out_dimension * time_out_dimension UpperCAmelCase__ = num_patches + 2 def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) UpperCAmelCase__ = None if self.use_labels: UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ = self.get_config() return config, input_values, labels def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = ASTModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase__ = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) = config_and_inputs UpperCAmelCase__ = {"""input_values""": input_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : Optional[int] = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) lowerCAmelCase_ : Dict = ( {"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel} if is_torch_available() else {} ) lowerCAmelCase_ : Tuple = False lowerCAmelCase_ : int = False lowerCAmelCase_ : Any = False lowerCAmelCase_ : str = False def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] ): """simple docstring""" if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = ASTModelTester(self ) UpperCAmelCase__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""AST does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ = model_class(_UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ = model_class(_UpperCAmelCase ) UpperCAmelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ = [*signature.parameters.keys()] UpperCAmelCase__ = ["""input_values"""] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ = ASTModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = hf_hub_download( repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" ) UpperCAmelCase__ , UpperCAmelCase__ = torchaudio.load(SCREAMING_SNAKE_CASE__ ) return audio, sampling_rate @require_torch @require_torchaudio class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" return ( ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ) if is_torchaudio_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = self.default_feature_extractor UpperCAmelCase__ = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(_UpperCAmelCase ) UpperCAmelCase__ = self.default_feature_extractor UpperCAmelCase__ , UpperCAmelCase__ = prepare_audio() UpperCAmelCase__ = audio.squeeze().numpy() UpperCAmelCase__ = feature_extractor(_UpperCAmelCase , sampling_rate=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): UpperCAmelCase__ = model(**_UpperCAmelCase ) # verify the logits UpperCAmelCase__ = torch.Size((1, 5_27) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) UpperCAmelCase__ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
61
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
61
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a__ : List[str] ={ '''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''], '''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Tuple =[ '''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AdaptiveEmbedding''', '''TransfoXLForSequenceClassification''', '''TransfoXLLMHeadModel''', '''TransfoXLModel''', '''TransfoXLPreTrainedModel''', '''load_tf_weights_in_transfo_xl''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Tuple =[ '''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFAdaptiveEmbedding''', '''TFTransfoXLForSequenceClassification''', '''TFTransfoXLLMHeadModel''', '''TFTransfoXLMainLayer''', '''TFTransfoXLModel''', '''TFTransfoXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys a__ : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
'''simple docstring''' from __future__ import annotations from typing import Any class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case : """simple docstring""" def __init__( self : List[Any] , __A : Any ): __UpperCamelCase = data __UpperCamelCase = None def __iter__( self : Optional[Any] ): __UpperCamelCase = self __UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(__A ) yield node.data __UpperCamelCase = node.next_node @property def _lowerCamelCase ( self : List[str] ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": a__ : Dict =Node(1) a__ : Optional[int] =Node(2) a__ : List[str] =Node(3) a__ : Optional[int] =Node(4) print(root_node.has_loop) # False a__ : str =root_node.next_node print(root_node.has_loop) # True a__ : Optional[int] =Node(5) a__ : List[Any] =Node(6) a__ : int =Node(5) a__ : Tuple =Node(6) print(root_node.has_loop) # False a__ : str =Node(1) print(root_node.has_loop) # False
53
1
'''simple docstring''' import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class a ( unittest.TestCase ): def __init__( self , __magic_name__ , __magic_name__=1_00 , __magic_name__=13 , __magic_name__=30 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10 , __magic_name__=0.0_2 , __magic_name__=3 , ) -> Dict: _a = parent _a = vocab_size _a = batch_size _a = image_size _a = patch_size _a = num_channels _a = is_training _a = use_labels _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = type_sequence_label_size _a = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _a = (image_size // patch_size) ** 2 _a = num_patches + 1 def __UpperCAmelCase ( self ) -> Union[str, Any]: _a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a = None if self.use_labels: _a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a = BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) return config, pixel_values, labels def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: _a = FlaxBeitModel(config=SCREAMING_SNAKE_CASE_ ) _a = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: _a = FlaxBeitForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ ) _a = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]: _a = self.type_sequence_label_size _a = FlaxBeitForImageClassification(config=SCREAMING_SNAKE_CASE_ ) _a = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _a = 1 _a = FlaxBeitForImageClassification(SCREAMING_SNAKE_CASE_ ) _a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _a = model(SCREAMING_SNAKE_CASE_ ) def __UpperCAmelCase ( self ) -> Union[str, Any]: _a = self.prepare_config_and_inputs() ( _a ) = config_and_inputs _a = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class a ( _lowerCAmelCase , unittest.TestCase ): _lowerCAmelCase = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def __UpperCAmelCase ( self ) -> None: _a = FlaxBeitModelTester(self ) _a = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def __UpperCAmelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def __UpperCAmelCase ( self ) -> Union[str, Any]: _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(SCREAMING_SNAKE_CASE_ ) _a = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a = [*signature.parameters.keys()] _a = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def __UpperCAmelCase ( self ) -> Optional[Any]: _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _a = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _a = model_class(SCREAMING_SNAKE_CASE_ ) @jax.jit def model_jitted(__magic_name__ , **__magic_name__ ): return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) with self.subTest('JIT Enabled' ): _a = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _a = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(jitted_output.shape , output.shape ) def __UpperCAmelCase ( self ) -> Any: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def __UpperCAmelCase ( self ) -> Union[str, Any]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ ) def __UpperCAmelCase ( self ) -> Dict: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) @slow def __UpperCAmelCase ( self ) -> int: for model_class_name in self.all_model_classes: _a = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' ) _a = model(np.ones((1, 3, 2_24, 2_24) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def _A () -> Any: '''simple docstring''' _a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @require_flax class a ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self ) -> Optional[int]: return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None @slow def __UpperCAmelCase ( self ) -> int: _a = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ) _a = self.default_image_processor _a = prepare_img() _a = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='np' ).pixel_values # prepare bool_masked_pos _a = np.ones((1, 1_96) , dtype=SCREAMING_SNAKE_CASE_ ) # forward pass _a = model(pixel_values=SCREAMING_SNAKE_CASE_ , bool_masked_pos=SCREAMING_SNAKE_CASE_ ) _a = outputs.logits # verify the logits _a = (1, 1_96, 81_92) self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ ) _a = np.array( [[-3.2_4_3_7, 0.5_0_7_2, -13.91_74], [-3.2_4_5_6, 0.4_9_4_8, -13.94_01], [-3.2_0_3_3, 0.5_1_2_1, -13.85_50]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-2 ) ) @slow def __UpperCAmelCase ( self ) -> List[Any]: _a = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ) _a = self.default_image_processor _a = prepare_img() _a = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='np' ) # forward pass _a = model(**SCREAMING_SNAKE_CASE_ ) _a = outputs.logits # verify the logits _a = (1, 10_00) self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ ) _a = np.array([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ) self.assertTrue(np.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) ) _a = 2_81 self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE_ ) @slow def __UpperCAmelCase ( self ) -> Optional[Any]: _a = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ) _a = self.default_image_processor _a = prepare_img() _a = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='np' ) # forward pass _a = model(**SCREAMING_SNAKE_CASE_ ) _a = outputs.logits # verify the logits _a = (1, 2_18_41) self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ ) _a = np.array([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ) self.assertTrue(np.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) ) _a = 23_96 self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE_ )
366
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) a_ : Optional[int] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Dict = ["DeiTFeatureExtractor"] a_ : List[Any] = ["DeiTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Tuple = [ "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", "DeiTModel", "DeiTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = [ "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", "TFDeiTModel", "TFDeiTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys a_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
104
0
'''simple docstring''' from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase =logging.get_logger(__name__) __UpperCAmelCase ={ "google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json", } class a__ ( UpperCAmelCase__ ): lowerCamelCase : str ="efficientnet" def __init__( self : Any , a : int = 3 , a : int = 6_00 , a : float = 2.0 , a : float = 3.1 , a : int = 8 , a : List[int] = [3, 3, 5, 3, 5, 5, 3] , a : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , a : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , a : List[int] = [] , a : List[int] = [1, 2, 2, 2, 1, 2, 1] , a : List[int] = [1, 2, 2, 3, 3, 4, 1] , a : List[int] = [1, 6, 6, 6, 6, 6, 6] , a : float = 0.25 , a : str = "swish" , a : int = 25_60 , a : str = "mean" , a : float = 0.02 , a : float = 0.0_01 , a : float = 0.99 , a : float = 0.5 , a : float = 0.2 , **a : Union[str, Any] , ): """simple docstring""" super().__init__(**a ) __lowerCamelCase = num_channels __lowerCamelCase = image_size __lowerCamelCase = width_coefficient __lowerCamelCase = depth_coefficient __lowerCamelCase = depth_divisor __lowerCamelCase = kernel_sizes __lowerCamelCase = in_channels __lowerCamelCase = out_channels __lowerCamelCase = depthwise_padding __lowerCamelCase = strides __lowerCamelCase = num_block_repeats __lowerCamelCase = expand_ratios __lowerCamelCase = squeeze_expansion_ratio __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dim __lowerCamelCase = pooling_type __lowerCamelCase = initializer_range __lowerCamelCase = batch_norm_eps __lowerCamelCase = batch_norm_momentum __lowerCamelCase = dropout_rate __lowerCamelCase = drop_connect_rate __lowerCamelCase = sum(a ) * 4 class a__ ( UpperCAmelCase__ ): lowerCamelCase : int =version.parse("1.11" ) @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" return 1e-5
67
'''simple docstring''' def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]: __lowerCamelCase = [] __lowerCamelCase = set({'''(''', '''[''', '''{'''} ) __lowerCamelCase = set({''')''', ''']''', '''}'''} ) __lowerCamelCase = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''} for i in range(len(UpperCamelCase__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(UpperCamelCase__ ) == 0 or (len(UpperCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(UpperCamelCase__ ) == 0 def __lowerCAmelCase ( ) -> str: __lowerCamelCase = input('''Enter sequence of brackets: ''' ) if is_balanced(UpperCamelCase__ ): print(UpperCamelCase__ , '''is balanced''' ) else: print(UpperCamelCase__ , '''is not balanced''' ) if __name__ == "__main__": main()
67
1
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowerCAmelCase = imread(R'digital_image_processing/image_data/lena_small.jpg') lowerCAmelCase = cvtColor(img, COLOR_BGR2GRAY) def _a ( ): """simple docstring""" lowercase__ = cn.convert_to_negative(SCREAMING_SNAKE_CASE ) # assert negative_img array for at least one True assert negative_img.any() def _a ( ): """simple docstring""" with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(SCREAMING_SNAKE_CASE , 1_10 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def _a ( ): """simple docstring""" lowercase__ = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def _a ( ): """simple docstring""" lowercase__ = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() lowercase__ = canny.canny(SCREAMING_SNAKE_CASE ) # assert canny array for at least one True assert canny_array.any() def _a ( ): """simple docstring""" assert gg.gaussian_filter(SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all() def _a ( ): """simple docstring""" lowercase__ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) lowercase__ = conv.img_convolve(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).astype(SCREAMING_SNAKE_CASE ) assert res.any() def _a ( ): """simple docstring""" assert med.median_filter(SCREAMING_SNAKE_CASE , 3 ).any() def _a ( ): """simple docstring""" lowercase__ , lowercase__ = sob.sobel_filter(SCREAMING_SNAKE_CASE ) assert grad.any() and theta.any() def _a ( ): """simple docstring""" lowercase__ = sp.make_sepia(SCREAMING_SNAKE_CASE , 20 ) assert sepia.all() def _a ( SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" ): """simple docstring""" lowercase__ = bs.Burkes(imread(SCREAMING_SNAKE_CASE , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def _a ( SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" , ): """simple docstring""" lowercase__ = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def _a ( ): """simple docstring""" lowercase__ = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. lowercase__ = imread(SCREAMING_SNAKE_CASE , 0 ) # Test for get_neighbors_pixel function() return not None lowercase__ = 0 lowercase__ = 0 lowercase__ = image[x_coordinate][y_coordinate] lowercase__ = lbp.get_neighbors_pixel( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowercase__ = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): lowercase__ = lbp.local_binary_value(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert lbp_image.any()
354
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase = { 'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'], 'tokenization_luke': ['LukeTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ 'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST', 'LukeForEntityClassification', 'LukeForEntityPairClassification', 'LukeForEntitySpanClassification', 'LukeForMultipleChoice', 'LukeForQuestionAnswering', 'LukeForSequenceClassification', 'LukeForTokenClassification', 'LukeForMaskedLM', 'LukeModel', 'LukePreTrainedModel', ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
93
0
import os from math import logaa def lowercase__ ( __snake_case : str = "base_exp.txt" ): '''simple docstring''' UpperCAmelCase_ : float = 0 UpperCAmelCase_ : Tuple = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) ): UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = list(map(__snake_case , line.split(',' ) ) ) if x * logaa(__snake_case ) > largest: UpperCAmelCase_ : Union[str, Any] = x * logaa(__snake_case ) UpperCAmelCase_ : Dict = i + 1 return result if __name__ == "__main__": print(solution())
29
def lowercase__ ( __snake_case : Dict ): '''simple docstring''' if not head: return True # split the list to two parts UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head while fast and fast.next: UpperCAmelCase_ : str = fast.next.next UpperCAmelCase_ : Union[str, Any] = slow.next UpperCAmelCase_ : int = slow.next UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works! # reverse the second part UpperCAmelCase_ : Tuple = None while second: UpperCAmelCase_ : int = second.next UpperCAmelCase_ : Any = node UpperCAmelCase_ : Optional[Any] = second UpperCAmelCase_ : Tuple = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False UpperCAmelCase_ : Optional[Any] = node.next UpperCAmelCase_ : Dict = head.next return True def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) UpperCAmelCase_ : Any = head while fast and fast.next: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next # 2. Push the second half into the stack UpperCAmelCase_ : List[str] = [slow.val] while slow.next: UpperCAmelCase_ : List[str] = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False UpperCAmelCase_ : int = cur.next return True def lowercase__ ( __snake_case : Dict ): '''simple docstring''' if not head or not head.next: return True UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : int = 0 while head: if head.val in d: d[head.val].append(__snake_case ) else: UpperCAmelCase_ : List[Any] = [pos] UpperCAmelCase_ : Any = head.next pos += 1 UpperCAmelCase_ : Dict = pos - 1 UpperCAmelCase_ : Optional[int] = 0 for v in d.values(): if len(__snake_case ) % 2 != 0: middle += 1 else: UpperCAmelCase_ : int = 0 for i in range(0 , len(__snake_case ) ): if v[i] + v[len(__snake_case ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
29
1
import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowerCAmelCase__ = '''.''' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) lowerCAmelCase__ = [ '''Assert''', '''AssignVariableOp''', '''EmptyTensorList''', '''MergeV2Checkpoints''', '''ReadVariableOp''', '''ResourceGather''', '''RestoreV2''', '''SaveV2''', '''ShardedFilename''', '''StatefulPartitionedCall''', '''StaticRegexFullMatch''', '''VarHandleOp''', ] def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : Tuple = SavedModel() lowercase__ : Optional[int] = [] with open(os.path.join(lowerCamelCase__ , "utils" , "tf_ops" , "onnx.json" ) ) as f: lowercase__ : List[str] = json.load(lowerCamelCase__ )["opsets"] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(lowerCamelCase__ )] ) with open(lowerCamelCase__ , "rb" ) as f: saved_model.ParseFromString(f.read() ) lowercase__ : Tuple = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want lowercase__ : Optional[int] = sorted(lowerCamelCase__ ) lowercase__ : str = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(lowerCamelCase__ ) if strict and len(lowerCamelCase__ ) > 0: raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops ) elif len(lowerCamelCase__ ) > 0: print(F"""Found the following incompatible ops for the opset {opset}:""" ) print(*lowerCamelCase__ , sep="\n" ) else: print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''') parser.add_argument( '''--opset''', default=1_2, type=int, help='''The ONNX opset against which the model has to be tested.''' ) parser.add_argument( '''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.''' ) parser.add_argument( '''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)''' ) lowerCAmelCase__ = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
121
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { '''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''], '''tokenization_deberta''': ['''DebertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''DebertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DebertaForMaskedLM''', '''DebertaForQuestionAnswering''', '''DebertaForSequenceClassification''', '''DebertaForTokenClassification''', '''DebertaModel''', '''DebertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDebertaForMaskedLM''', '''TFDebertaForQuestionAnswering''', '''TFDebertaForSequenceClassification''', '''TFDebertaForTokenClassification''', '''TFDebertaModel''', '''TFDebertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
121
1
def a ( lowerCamelCase_ ): '''simple docstring''' lowercase__ = 0 lowercase__ = len(lowerCamelCase_ ) for i in range(n - 1 ): for j in range(i + 1 , lowerCamelCase_ ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def a ( lowerCamelCase_ ): '''simple docstring''' if len(lowerCamelCase_ ) <= 1: return arr, 0 lowercase__ = len(lowerCamelCase_ ) // 2 lowercase__ = arr[0:mid] lowercase__ = arr[mid:] lowercase__ , lowercase__ = count_inversions_recursive(lowerCamelCase_ ) lowercase__ , lowercase__ = count_inversions_recursive(lowerCamelCase_ ) lowercase__ , lowercase__ = _count_cross_inversions(lowerCamelCase_ , lowerCamelCase_ ) lowercase__ = inversion_p + inversions_q + cross_inversions return c, num_inversions def a ( lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' lowercase__ = [] lowercase__ = lowercase__ = lowercase__ = 0 while i < len(lowerCamelCase_ ) and j < len(lowerCamelCase_ ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(lowerCamelCase_ ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(lowerCamelCase_ ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def a ( ): '''simple docstring''' lowercase__ = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) lowercase__ = count_inversions_bf(lowerCamelCase_ ) lowercase__ , lowercase__ = count_inversions_recursive(lowerCamelCase_ ) assert num_inversions_bf == num_inversions_recursive == 8 print('''number of inversions = ''' , lowerCamelCase_ ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() lowercase__ = count_inversions_bf(lowerCamelCase_ ) lowercase__ , lowercase__ = count_inversions_recursive(lowerCamelCase_ ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , lowerCamelCase_ ) # an empty list should also have zero inversions lowercase__ = [] lowercase__ = count_inversions_bf(lowerCamelCase_ ) lowercase__ , lowercase__ = count_inversions_recursive(lowerCamelCase_ ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , lowerCamelCase_ ) if __name__ == "__main__": main()
207
import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class _UpperCAmelCase ( A__ ): """simple docstring""" def lowercase__ ( self : Any ): '''simple docstring''' lowercase__ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCamelCase, '''hidden_sizes''' ) ) self.parent.assertTrue(hasattr(lowerCamelCase, '''num_attention_heads''' ) ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple, lowerCamelCase : str, lowerCamelCase : str=13, lowerCamelCase : Union[str, Any]=64, lowerCamelCase : str=3, lowerCamelCase : int=3, lowerCamelCase : Dict=2, lowerCamelCase : int=1, lowerCamelCase : Optional[Any]=16, lowerCamelCase : Dict=[128, 256, 384], lowerCamelCase : Tuple=[4, 6, 8], lowerCamelCase : Optional[Any]=[2, 3, 4], lowerCamelCase : str=[16, 16, 16], lowerCamelCase : Dict=0, lowerCamelCase : List[str]=[2, 2, 2], lowerCamelCase : str=[2, 2, 2], lowerCamelCase : List[Any]=0.02, lowerCamelCase : Any=True, lowerCamelCase : Tuple=True, lowerCamelCase : Optional[Any]=2, ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = num_channels lowercase__ = kernel_size lowercase__ = stride lowercase__ = padding lowercase__ = hidden_sizes lowercase__ = num_attention_heads lowercase__ = depths lowercase__ = key_dim lowercase__ = drop_path_rate lowercase__ = patch_size lowercase__ = attention_ratio lowercase__ = mlp_ratio lowercase__ = initializer_range lowercase__ = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] lowercase__ = is_training lowercase__ = use_labels lowercase__ = num_labels lowercase__ = initializer_range def lowercase__ ( self : Tuple ): '''simple docstring''' lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size], self.num_labels ) lowercase__ = self.get_config() return config, pixel_values, labels def lowercase__ ( self : List[str] ): '''simple docstring''' return LevitConfig( image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, ) def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : int, lowerCamelCase : int ): '''simple docstring''' lowercase__ = LevitModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowercase__ = model(lowerCamelCase ) lowercase__ = (self.image_size, self.image_size) lowercase__ , lowercase__ = image_size[0], image_size[1] for _ in range(4 ): lowercase__ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) lowercase__ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]), ) def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : List[Any], lowerCamelCase : List[Any] ): '''simple docstring''' lowercase__ = self.num_labels lowercase__ = LevitForImageClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowercase__ = model(lowerCamelCase, labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def lowercase__ ( self : int ): '''simple docstring''' lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ = config_and_inputs lowercase__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ): """simple docstring""" lowercase__ = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) lowercase__ = ( { """feature-extraction""": LevitModel, """image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def lowercase__ ( self : List[Any] ): '''simple docstring''' lowercase__ = LevitModelTester(self ) lowercase__ = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 ) def lowercase__ ( self : str ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase__ ( self : Tuple ): '''simple docstring''' return @unittest.skip(reason='''Levit does not use inputs_embeds''' ) def lowercase__ ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason='''Levit does not support input and output embeddings''' ) def lowercase__ ( self : Tuple ): '''simple docstring''' pass @unittest.skip(reason='''Levit does not output attentions''' ) def lowercase__ ( self : Dict ): '''simple docstring''' pass def lowercase__ ( self : List[Any] ): '''simple docstring''' lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(lowerCamelCase ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1], lowerCamelCase ) def lowercase__ ( self : Tuple ): '''simple docstring''' def check_hidden_states_output(lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : Tuple ): lowercase__ = model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() with torch.no_grad(): lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) ) lowercase__ = outputs.hidden_states lowercase__ = len(self.model_tester.depths ) + 1 self.assertEqual(len(lowerCamelCase ), lowerCamelCase ) lowercase__ = (self.model_tester.image_size, self.model_tester.image_size) lowercase__ , lowercase__ = image_size[0], image_size[1] for _ in range(4 ): lowercase__ = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) lowercase__ = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [ height * width, self.model_tester.hidden_sizes[0], ], ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = True check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ = True check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowercase__ ( self : Optional[Any] ): '''simple docstring''' pass def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : Any, lowerCamelCase : Any=False ): '''simple docstring''' lowercase__ = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def lowercase__ ( self : int ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def lowercase__ ( self : List[str] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase ) def lowercase__ ( self : Optional[Any] ): '''simple docstring''' if not self.model_tester.is_training: return lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(lowerCamelCase ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue lowercase__ = model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.train() lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase ) lowercase__ = model(**lowerCamelCase ).loss loss.backward() def lowercase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return lowercase__ = False lowercase__ = True for model_class in self.all_model_classes: if model_class in get_values(lowerCamelCase ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue lowercase__ = model_class(lowerCamelCase ) model.gradient_checkpointing_enable() model.to(lowerCamelCase ) model.train() lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase ) lowercase__ = model(**lowerCamelCase ).loss loss.backward() def lowercase__ ( self : List[str] ): '''simple docstring''' lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = [ {'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float}, {'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long}, {'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(lowerCamelCase ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ): lowercase__ = problem_type['''title'''] lowercase__ = problem_type['''num_labels'''] lowercase__ = model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.train() lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase ) if problem_type["num_labels"] > 1: lowercase__ = inputs['''labels'''].unsqueeze(1 ).repeat(1, problem_type['''num_labels'''] ) lowercase__ = inputs['''labels'''].to(problem_type['''dtype'''] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=lowerCamelCase ) as warning_list: lowercase__ = model(**lowerCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def lowercase__ ( self : Optional[int] ): '''simple docstring''' for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = LevitModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def a ( ): '''simple docstring''' lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase__ ( self : int ): '''simple docstring''' return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowercase__ ( self : List[Any] ): '''simple docstring''' lowercase__ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( lowerCamelCase ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase ) # forward pass with torch.no_grad(): lowercase__ = model(**lowerCamelCase ) # verify the logits lowercase__ = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, lowerCamelCase ) lowercase__ = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4 ) )
207
1
from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _lowerCamelCase : Optional[int] = logging.get_logger(__name__) class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = ["""input_values""", """padding_mask"""] def __init__( self : int, __A : int = 1, __A : int = 2_4_0_0_0, __A : float = 0.0, __A : float = None, __A : float = None, **__A : int, ): super().__init__(feature_size=__A, sampling_rate=__A, padding_value=__A, **__A ) UpperCAmelCase : List[str] = chunk_length_s UpperCAmelCase : int = overlap @property def __magic_name__ ( self : List[str] ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __magic_name__ ( self : Tuple ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1, int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self : Dict, __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], __A : Optional[Union[bool, str, PaddingStrategy]] = None, __A : Optional[bool] = False, __A : Optional[int] = None, __A : Optional[Union[str, TensorType]] = None, __A : Optional[int] = None, ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) if padding and truncation: raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' ) elif padding is None: # by default let's pad the inputs UpperCAmelCase : Optional[int] = True UpperCAmelCase : str = bool( isinstance(__A, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase : int = [np.asarray(__A, dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__A, np.ndarray ): UpperCAmelCase : List[Any] = np.asarray(__A, dtype=np.floataa ) elif isinstance(__A, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): UpperCAmelCase : Dict = raw_audio.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase : Union[str, Any] = [np.asarray(__A ).T] # verify inputs are valid for idx, example in enumerate(__A ): if example.ndim > 2: raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' ) UpperCAmelCase : Tuple = None UpperCAmelCase : str = BatchFeature({'''input_values''': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: UpperCAmelCase : Optional[Any] = min(array.shape[0] for array in raw_audio ) UpperCAmelCase : Tuple = int(np.floor(max_length / self.chunk_stride ) ) UpperCAmelCase : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: UpperCAmelCase : Union[str, Any] = max(array.shape[0] for array in raw_audio ) UpperCAmelCase : Union[str, Any] = int(np.ceil(max_length / self.chunk_stride ) ) UpperCAmelCase : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length UpperCAmelCase : Optional[int] = '''max_length''' else: UpperCAmelCase : str = input_values # normal padding on batch if padded_inputs is None: UpperCAmelCase : Optional[int] = self.pad( __A, max_length=__A, truncation=__A, padding=__A, return_attention_mask=__A, ) if padding: UpperCAmelCase : Optional[Any] = padded_inputs.pop('''attention_mask''' ) UpperCAmelCase : Dict = [] for example in padded_inputs.pop('''input_values''' ): if self.feature_size == 1: UpperCAmelCase : Optional[int] = example[..., None] input_values.append(example.T ) UpperCAmelCase : List[Any] = input_values if return_tensors is not None: UpperCAmelCase : List[Any] = padded_inputs.convert_to_tensors(__A ) return padded_inputs
353
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: _lowerCamelCase : List[Any] = None _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : str = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} _lowerCamelCase : str = { "vocab_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json" ), }, } _lowerCamelCase : Any = { "facebook/nllb-large-en-ro": 1_0_2_4, "facebook/nllb-200-distilled-600M": 1_0_2_4, } # fmt: off _lowerCamelCase : int = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"] class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = ["""input_ids""", """attention_mask"""] UpperCamelCase = NllbTokenizer UpperCamelCase = [] UpperCamelCase = [] def __init__( self : Optional[Any], __A : Tuple=None, __A : int=None, __A : List[Any]="<s>", __A : Tuple="</s>", __A : Any="</s>", __A : Optional[Any]="<s>", __A : Tuple="<unk>", __A : str="<pad>", __A : Dict="<mask>", __A : Optional[Any]=None, __A : List[Any]=None, __A : List[Any]=None, __A : str=False, **__A : Tuple, ): # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase : int = AddedToken(__A, lstrip=__A, rstrip=__A ) if isinstance(__A, __A ) else mask_token UpperCAmelCase : str = legacy_behaviour super().__init__( vocab_file=__A, tokenizer_file=__A, bos_token=__A, eos_token=__A, sep_token=__A, cls_token=__A, unk_token=__A, pad_token=__A, mask_token=__A, src_lang=__A, tgt_lang=__A, additional_special_tokens=__A, legacy_behaviour=__A, **__A, ) UpperCAmelCase : Optional[int] = vocab_file UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True UpperCAmelCase : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) UpperCAmelCase : List[Any] = { lang_code: self.convert_tokens_to_ids(__A ) for lang_code in FAIRSEQ_LANGUAGE_CODES } UpperCAmelCase : List[Any] = src_lang if src_lang is not None else '''eng_Latn''' UpperCAmelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang ) UpperCAmelCase : List[str] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __magic_name__ ( self : Optional[int] ): return self._src_lang @src_lang.setter def __magic_name__ ( self : Union[str, Any], __A : str ): UpperCAmelCase : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __magic_name__ ( self : Any, __A : List[int], __A : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __magic_name__ ( self : Tuple, __A : List[int], __A : Optional[List[int]] = None ): UpperCAmelCase : Union[str, Any] = [self.sep_token_id] UpperCAmelCase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __magic_name__ ( self : str, __A : Optional[int], __A : str, __A : Optional[str], __A : Optional[str], **__A : Union[str, Any] ): if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) UpperCAmelCase : Optional[Any] = src_lang UpperCAmelCase : Optional[Any] = self(__A, add_special_tokens=__A, return_tensors=__A, **__A ) UpperCAmelCase : Union[str, Any] = self.convert_tokens_to_ids(__A ) UpperCAmelCase : int = tgt_lang_id return inputs def __magic_name__ ( self : List[str], __A : List[str], __A : str = "eng_Latn", __A : Optional[List[str]] = None, __A : str = "fra_Latn", **__A : Tuple, ): UpperCAmelCase : Any = src_lang UpperCAmelCase : Tuple = tgt_lang return super().prepare_seqaseq_batch(__A, __A, **__A ) def __magic_name__ ( self : List[str] ): return self.set_src_lang_special_tokens(self.src_lang ) def __magic_name__ ( self : Optional[Any] ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __magic_name__ ( self : Union[str, Any], __A : List[str] ): UpperCAmelCase : int = self.convert_tokens_to_ids(__A ) if self.legacy_behaviour: UpperCAmelCase : Tuple = [] UpperCAmelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code] else: UpperCAmelCase : Any = [self.cur_lang_code] UpperCAmelCase : Optional[Any] = [self.eos_token_id] UpperCAmelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase : int = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str, pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), ) def __magic_name__ ( self : Dict, __A : str ): UpperCAmelCase : Optional[int] = self.convert_tokens_to_ids(__A ) if self.legacy_behaviour: UpperCAmelCase : List[Any] = [] UpperCAmelCase : Any = [self.eos_token_id, self.cur_lang_code] else: UpperCAmelCase : int = [self.cur_lang_code] UpperCAmelCase : List[Any] = [self.eos_token_id] UpperCAmelCase : str = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase : List[str] = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str, pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), ) def __magic_name__ ( self : Dict, __A : str, __A : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__A ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' ) return UpperCAmelCase : Tuple = os.path.join( __A, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ): copyfile(self.vocab_file, __A ) return (out_vocab_file,)
99
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ 'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'WavLMForAudioFrameClassification', 'WavLMForCTC', 'WavLMForSequenceClassification', 'WavLMForXVector', 'WavLMModel', 'WavLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
303
"""simple docstring""" from __future__ import annotations def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> list: __lowerCAmelCase : Dict = [] __lowerCAmelCase , __lowerCAmelCase : Any = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) __lowerCAmelCase : int = result + left + right return input_list def _lowercase ( __snake_case ) -> list: if len(__snake_case ) <= 1: return input_list __lowerCAmelCase : int = list(__snake_case ) # iteration for two-way merging __lowerCAmelCase : Optional[int] = 2 while p <= len(__snake_case ): # getting low, high and middle value for merge-sort of single list for i in range(0 ,len(__snake_case ) ,__snake_case ): __lowerCAmelCase : Union[str, Any] = i __lowerCAmelCase : Tuple = i + p - 1 __lowerCAmelCase : Optional[Any] = (low + high + 1) // 2 __lowerCAmelCase : Any = merge(__snake_case ,__snake_case ,__snake_case ,__snake_case ) # final merge of last two parts if p * 2 >= len(__snake_case ): __lowerCAmelCase : Optional[Any] = i __lowerCAmelCase : Union[str, Any] = merge(__snake_case ,0 ,__snake_case ,len(__snake_case ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": __snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip() if user_input == "": __snake_case : Optional[int] = [] else: __snake_case : int = [int(item.strip()) for item in user_input.split(',')] print(iter_merge_sort(unsorted))
269
0
import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig lowerCAmelCase_ = { 'facebook/maskformer-swin-base-ade': ( 'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json' ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } lowerCAmelCase_ = logging.get_logger(__name__) class _A ( _lowerCamelCase ): _UpperCamelCase : int = '''maskformer''' _UpperCamelCase : Union[str, Any] = {'''hidden_size''': '''mask_feature_size'''} _UpperCamelCase : Dict = ['''resnet''', '''swin'''] _UpperCamelCase : Optional[int] = ['''detr'''] def __init__( self : Any , _A : int = 256 , _A : int = 256 , _A : float = 0.1 , _A : bool = False , _A : Optional[Dict] = None , _A : Optional[Dict] = None , _A : float = 0.02 , _A : float = 1.0 , _A : float = 1.0 , _A : float = 1.0 , _A : float = 20.0 , _A : Optional[bool] = None , **_A : str , ) -> str: """simple docstring""" if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k lowercase : List[str] = SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) if isinstance(_A , _A ): lowercase : Optional[int] = backbone_config.pop('''model_type''' ) lowercase : List[str] = CONFIG_MAPPING[backbone_model_type] lowercase : Union[str, Any] = config_class.from_dict(_A ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """ f"""Supported model types: {','.join(self.backbones_supported )}""" ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 lowercase : Any = DetrConfig() else: # verify that the decoder is supported lowercase : Union[str, Any] = ( decoder_config.pop('''model_type''' ) if isinstance(_A , _A ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( f"""Transformer Decoder {decoder_type} not supported, please use one of""" f""" {','.join(self.decoders_supported )}""" ) if isinstance(_A , _A ): lowercase : str = CONFIG_MAPPING[decoder_type] lowercase : Dict = config_class.from_dict(_A ) lowercase : Tuple = backbone_config lowercase : List[Any] = decoder_config # main feature dimension for the model lowercase : Optional[int] = fpn_feature_size lowercase : List[Any] = mask_feature_size # initializer lowercase : Union[str, Any] = init_std lowercase : Tuple = init_xavier_std # Hungarian matcher && loss lowercase : List[str] = cross_entropy_weight lowercase : int = dice_weight lowercase : List[Any] = mask_weight lowercase : Tuple = use_auxiliary_loss lowercase : Tuple = no_object_weight lowercase : int = output_auxiliary_logits lowercase : List[Any] = self.decoder_config.encoder_attention_heads lowercase : List[Any] = self.decoder_config.num_hidden_layers super().__init__(**_A ) @classmethod def __a ( cls : Optional[int] , _A : PretrainedConfig , _A : PretrainedConfig , **_A : str ) -> Any: """simple docstring""" return cls( backbone_config=_A , decoder_config=_A , **_A , ) def __a ( self : Optional[int] ) -> Dict[str, any]: """simple docstring""" lowercase : str = copy.deepcopy(self.__dict__ ) lowercase : Optional[Any] = self.backbone_config.to_dict() lowercase : List[Any] = self.decoder_config.to_dict() lowercase : Optional[int] = self.__class__.model_type return output
356
import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowerCAmelCase_ = logging.get_logger(__name__) class _A ( _lowerCamelCase ): _UpperCamelCase : str = ['''input_values''', '''attention_mask'''] def __init__( self : Optional[Any] , _A : int = 1 , _A : int = 16_000 , _A : float = 0.0 , _A : bool = False , _A : int = 80 , _A : int = 16 , _A : int = 64 , _A : str = "hann_window" , _A : float = 1.0 , _A : float = 80 , _A : float = 7_600 , _A : float = 1E-10 , _A : int = 2 , _A : bool = True , **_A : int , ) -> Union[str, Any]: """simple docstring""" super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A ) lowercase : str = do_normalize lowercase : int = return_attention_mask lowercase : Union[str, Any] = num_mel_bins lowercase : Union[str, Any] = hop_length lowercase : Dict = win_length lowercase : Union[str, Any] = win_function lowercase : int = frame_signal_scale lowercase : Dict = fmin lowercase : Optional[Any] = fmax lowercase : str = mel_floor lowercase : Dict = reduction_factor lowercase : List[Any] = win_length * sampling_rate // 1_000 lowercase : Union[str, Any] = hop_length * sampling_rate // 1_000 lowercase : Optional[Any] = optimal_fft_length(self.sample_size ) lowercase : Dict = (self.n_fft // 2) + 1 lowercase : Any = window_function(window_length=self.sample_size , name=self.win_function , periodic=_A ) lowercase : Dict = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , ) if frame_signal_scale != 1.0: warnings.warn( '''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , ) if reduction_factor != 2.0: warnings.warn( '''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]: """simple docstring""" if attention_mask is not None: lowercase : Optional[int] = np.array(_A , np.intaa ) lowercase : Dict = [] for vector, length in zip(_A , attention_mask.sum(-1 ) ): lowercase : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: lowercase : List[str] = padding_value normed_input_values.append(_A ) else: lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __a ( self : Any , _A : np.ndarray , ) -> np.ndarray: """simple docstring""" lowercase : Tuple = spectrogram( _A , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , ) return log_mel_spec.T def __call__( self : List[Any] , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[int] = None , **_A : Tuple , ) -> BatchFeature: """simple docstring""" if audio is None and audio_target is None: raise ValueError('''You must provide either `audio` or `audio_target` values.''' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the ``sampling_rate`` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) if audio is not None: lowercase : Any = self._process_audio( _A , _A , _A , _A , _A , _A , _A , _A , **_A , ) else: lowercase : Any = None if audio_target is not None: lowercase : Tuple = self._process_audio( _A , _A , _A , _A , _A , _A , _A , _A , **_A , ) if inputs is None: return inputs_target else: lowercase : Any = inputs_target['''input_values'''] lowercase : Dict = inputs_target.get('''attention_mask''' ) if decoder_attention_mask is not None: lowercase : Union[str, Any] = decoder_attention_mask return inputs def __a ( self : List[Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = False , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , **_A : Any , ) -> BatchFeature: """simple docstring""" lowercase : Optional[int] = isinstance(_A , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) lowercase : int = is_batched_numpy or ( isinstance(_A , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowercase : Optional[int] = [np.asarray(_A , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_A , np.ndarray ): lowercase : List[str] = np.asarray(_A , dtype=np.floataa ) elif isinstance(_A , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): lowercase : List[str] = speech.astype(np.floataa ) # always return batch if not is_batched: lowercase : Union[str, Any] = [speech] # needed to make pad() work on spectrogram inputs lowercase : Any = self.feature_size # convert into correct format for padding if is_target: lowercase : int = [self._extract_mel_features(_A ) for waveform in speech] lowercase : Any = BatchFeature({'''input_values''': features} ) lowercase : Optional[Any] = self.num_mel_bins else: lowercase : Optional[Any] = BatchFeature({'''input_values''': speech} ) lowercase : Optional[int] = self.pad( _A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , ) lowercase : str = feature_size_hack # convert input values to correct format lowercase : List[Any] = padded_inputs['''input_values'''] if not isinstance(input_values[0] , np.ndarray ): lowercase : List[str] = [np.asarray(_A , dtype=np.floataa ) for array in input_values] elif ( not isinstance(_A , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): lowercase : List[str] = [array.astype(np.floataa ) for array in input_values] elif isinstance(_A , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): lowercase : Optional[Any] = input_values.astype(np.floataa ) # convert attention_mask to correct format lowercase : int = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: lowercase : Union[str, Any] = [np.asarray(_A , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: lowercase : Any = ( attention_mask if self._get_padding_strategies(_A , max_length=_A ) is not PaddingStrategy.DO_NOT_PAD else None ) lowercase : Optional[int] = self.zero_mean_unit_var_norm( padded_inputs['''input_values'''] , attention_mask=_A , padding_value=self.padding_value ) if return_tensors is not None: lowercase : Tuple = padded_inputs.convert_to_tensors(_A ) return padded_inputs def __a ( self : Optional[Any] ) -> Dict[str, Any]: """simple docstring""" lowercase : Optional[Any] = super().to_dict() # Don't serialize these as they are derived from the other properties. lowercase : Optional[int] = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs'''] for name in names: if name in output: del output[name] return output
116
0
'''simple docstring''' import os import string import sys UpperCamelCase__ : int = 1 << 8 UpperCamelCase__ : Tuple = { '''tab''': ord('''\t'''), '''newline''': ord('''\r'''), '''esc''': 27, '''up''': 65 + ARROW_KEY_FLAG, '''down''': 66 + ARROW_KEY_FLAG, '''right''': 67 + ARROW_KEY_FLAG, '''left''': 68 + ARROW_KEY_FLAG, '''mod_int''': 91, '''undefined''': sys.maxsize, '''interrupt''': 3, '''insert''': 50, '''delete''': 51, '''pg_up''': 53, '''pg_down''': 54, } UpperCamelCase__ : str = KEYMAP['''up'''] UpperCamelCase__ : Any = KEYMAP['''left'''] if sys.platform == "win32": UpperCamelCase__ : Tuple = [] UpperCamelCase__ : List[str] = { B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, } for i in range(10): UpperCamelCase__ : Optional[Any] = ord(str(i)) def lowerCAmelCase_ ( ): if os.name == "nt": import msvcrt __SCREAMING_SNAKE_CASE : Dict = 'mbcs' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(_lowerCamelCase ) == 0: # Read the keystroke __SCREAMING_SNAKE_CASE : str = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): __SCREAMING_SNAKE_CASE : List[Any] = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: __SCREAMING_SNAKE_CASE : List[Any] = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) ) WIN_CH_BUFFER.append(_lowerCamelCase ) if ord(_lowerCamelCase ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(1_26 ) ) __SCREAMING_SNAKE_CASE : List[str] = chr(KEYMAP["""esc"""] ) except KeyError: __SCREAMING_SNAKE_CASE : Dict = cha[1] else: __SCREAMING_SNAKE_CASE : Tuple = ch.decode(_lowerCamelCase ) else: __SCREAMING_SNAKE_CASE : Optional[Any] = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty __SCREAMING_SNAKE_CASE : Optional[Any] = sys.stdin.fileno() __SCREAMING_SNAKE_CASE : int = termios.tcgetattr(_lowerCamelCase ) try: tty.setraw(_lowerCamelCase ) __SCREAMING_SNAKE_CASE : Any = sys.stdin.read(1 ) finally: termios.tcsetattr(_lowerCamelCase , termios.TCSADRAIN , _lowerCamelCase ) return ch def lowerCAmelCase_ ( ): __SCREAMING_SNAKE_CASE : Dict = get_raw_chars() if ord(_lowerCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(_lowerCamelCase ) == KEYMAP["esc"]: __SCREAMING_SNAKE_CASE : List[Any] = get_raw_chars() if ord(_lowerCamelCase ) == KEYMAP["mod_int"]: __SCREAMING_SNAKE_CASE : str = get_raw_chars() if ord(_lowerCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_lowerCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(_lowerCamelCase ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
112
__lowerCAmelCase = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def snake_case_ ( snake_case , snake_case , snake_case ) -> list[str]: lowercase__: int = set() # keep track of all the paths to be checked lowercase__: Optional[int] = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue lowercase__: List[Any] = queue.pop(0 ) # get the last node from the path lowercase__: Optional[int] = path[-1] if node not in explored: lowercase__: Optional[Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: lowercase__: Tuple = list(snake_case ) new_path.append(snake_case ) queue.append(snake_case ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(snake_case ) # in case there's no path between the 2 nodes return [] def snake_case_ ( snake_case , snake_case , snake_case ) -> int: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 lowercase__: Tuple = [start] lowercase__: List[Any] = set(snake_case ) # Keep tab on distances from `start` node. lowercase__: Tuple = {start: 0, target: -1} while queue: lowercase__: Dict = queue.pop(0 ) if node == target: lowercase__: str = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(snake_case ) queue.append(snake_case ) lowercase__: List[str] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
196
0
from PIL import Image def _UpperCamelCase (a__ :Image , a__ :int ): """simple docstring""" UpperCamelCase__ = (259 * (level + 255)) / (255 * (259 - level)) def contrast(a__ :int ) -> int: return int(128 + factor * (c - 128) ) return img.point(a__ ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change contrast to 170 UpperCamelCase__ = change_contrast(img, 170) cont_img.save("image_data/lena_high_contrast.png", format="png")
87
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy UpperCamelCase__ = logging.getLogger(__name__) UpperCamelCase__ = "pytorch_model.bin" @dataclasses.dataclass class __SCREAMING_SNAKE_CASE : snake_case : str = dataclasses.field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} ) snake_case : Optional[str] = dataclasses.field( default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , ) @dataclasses.dataclass class __SCREAMING_SNAKE_CASE : snake_case : str = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} ) snake_case : str = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} ) snake_case : Optional[str] = dataclasses.field( default=_a , metadata={"""help""": """A csv or a json file containing the validation data."""} ) snake_case : Optional[str] = dataclasses.field( default=_a , metadata={"""help""": """The name of the task to train on."""} , ) snake_case : Optional[List[str]] = dataclasses.field( default=_a , metadata={"""help""": """The list of labels for the task."""} ) @dataclasses.dataclass class __SCREAMING_SNAKE_CASE : snake_case : str = dataclasses.field( metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} ) snake_case : Optional[str] = dataclasses.field( default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""} ) snake_case : Optional[str] = dataclasses.field( default="""no""" , metadata={ """help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]""" } , ) snake_case : Optional[int] = dataclasses.field( default=10 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , ) snake_case : Optional[float] = dataclasses.field( default=0.0 , metadata={ """help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions.""" } , ) snake_case : Optional[bool] = dataclasses.field( default=_a , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , ) snake_case : Optional[bool] = dataclasses.field( default=_a , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , ) snake_case : Optional[bool] = dataclasses.field( default=_a , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , ) snake_case : Optional[float] = dataclasses.field( default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , ) snake_case : Optional[int] = dataclasses.field( default=1_00 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , ) snake_case : Optional[int] = dataclasses.field( default=_a , metadata={"""help""": """Random seed for initialization."""} , ) def _UpperCamelCase (a__ :List[str] , a__ :str , a__ :Any , a__ :Any , a__ :List[str] , a__ :Union[str, Any] ): """simple docstring""" UpperCamelCase__ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: UpperCamelCase__ = dataset.filter(lambda a__ : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 UpperCamelCase__ = int(eval_result * len(a__ ) ) print(a__ ) UpperCamelCase__ = dataset.sort("""probability""" , reverse=a__ ) UpperCamelCase__ = dataset.select(range(a__ ) ) UpperCamelCase__ = dataset.remove_columns(["""label""", """probability"""] ) UpperCamelCase__ = dataset.rename_column("""prediction""" , """label""" ) UpperCamelCase__ = dataset.map(lambda a__ : {"label": idalabel[example["label"]]} ) UpperCamelCase__ = dataset.shuffle(seed=args.seed ) UpperCamelCase__ = os.path.join(a__ , f"""train_pseudo.{args.data_file_extension}""" ) if args.data_file_extension == "csv": dataset.to_csv(a__ , index=a__ ) else: dataset.to_json(a__ ) def _UpperCamelCase (a__ :Union[str, Any] , a__ :Any , a__ :Optional[int] , a__ :Union[str, Any] , **a__ :Union[str, Any] ): """simple docstring""" UpperCamelCase__ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() UpperCamelCase__ = STModelArguments(model_name_or_path=a__ ) UpperCamelCase__ = STDataArguments(train_file=a__ , infer_file=a__ ) UpperCamelCase__ = STTrainingArguments(output_dir=a__ ) UpperCamelCase__ = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(a__ ).items(): setattr(a__ , a__ , a__ ) for key, value in kwargs.items(): if hasattr(a__ , a__ ): setattr(a__ , a__ , a__ ) # Sanity checks UpperCamelCase__ = {} UpperCamelCase__ = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None UpperCamelCase__ = args.train_file UpperCamelCase__ = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None UpperCamelCase__ = args.eval_file for key in data_files: UpperCamelCase__ = data_files[key].split(""".""" )[-1] assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file.""" if args.data_file_extension is None: UpperCamelCase__ = extension else: assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`.""" assert ( args.eval_metric in datasets.list_metrics() ), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.""" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("""Creating the initial data directory for self-training...""" ) UpperCamelCase__ = f"""{args.output_dir}/self-train_iter-{{}}""".format UpperCamelCase__ = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=a__ ) os.makedirs(a__ , exist_ok=a__ ) accelerator.wait_for_everyone() UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = 0 UpperCamelCase__ = False # Show the progress bar UpperCamelCase__ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): UpperCamelCase__ = data_dir_format(a__ ) assert os.path.exists(a__ ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 UpperCamelCase__ = os.path.join(a__ , """stage-1""" ) UpperCamelCase__ = { """accelerator""": accelerator, """model_name_or_path""": args.model_name_or_path, """cache_dir""": args.cache_dir, """do_train""": True, """train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""], """do_eval""": True if args.eval_file is not None else False, """eval_file""": data_files["""eval"""], """do_predict""": True, """infer_file""": data_files["""infer"""], """task_name""": args.task_name, """label_list""": args.label_list, """output_dir""": current_output_dir, """eval_metric""": args.eval_metric, """evaluation_strategy""": args.evaluation_strategy, """early_stopping_patience""": args.early_stopping_patience, """early_stopping_threshold""": args.early_stopping_threshold, """seed""": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(a__ , a__ ): arguments_dict.update({key: value} ) UpperCamelCase__ = os.path.join(a__ , """best-checkpoint""" , a__ ) if os.path.exists(a__ ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , a__ , a__ , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , a__ ) finetune(**a__ ) accelerator.wait_for_everyone() assert os.path.exists(a__ ) logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , a__ ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data UpperCamelCase__ = os.path.join(a__ , """best-checkpoint""" ) UpperCamelCase__ = os.path.join(a__ , """stage-2""" ) # Update arguments_dict UpperCamelCase__ = model_path UpperCamelCase__ = data_files["""train"""] UpperCamelCase__ = current_output_dir UpperCamelCase__ = os.path.join(a__ , """best-checkpoint""" , a__ ) if os.path.exists(a__ ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , a__ , a__ , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , a__ ) finetune(**a__ ) accelerator.wait_for_everyone() assert os.path.exists(a__ ) logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , a__ ) UpperCamelCase__ = iteration UpperCamelCase__ = data_dir_format(iteration + 1 ) UpperCamelCase__ = AutoConfig.from_pretrained(os.path.join(a__ , """best-checkpoint""" ) ) UpperCamelCase__ = config.idalabel UpperCamelCase__ = os.path.join(a__ , """eval_results_best-checkpoint.json""" ) UpperCamelCase__ = os.path.join(a__ , """test_results_best-checkpoint.json""" ) assert os.path.exists(a__ ) with open(a__ , """r""" ) as f: UpperCamelCase__ = float(json.load(a__ )[args.eval_metric] ) UpperCamelCase__ = os.path.join(a__ , """infer_output_best-checkpoint.csv""" ) assert os.path.exists(a__ ) # Loading the dataset from local csv or json files. UpperCamelCase__ = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""] UpperCamelCase__ = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""] if accelerator.is_main_process: os.makedirs(a__ , exist_ok=a__ ) shutil.copy(a__ , os.path.join(a__ , f"""eval_results_iter-{iteration}.json""" ) ) if os.path.exists(a__ ): shutil.copy(a__ , os.path.join(a__ , f"""test_results_iter-{iteration}.json""" ) ) create_pseudo_labeled_data(a__ , a__ , a__ , a__ , a__ , a__ ) accelerator.wait_for_everyone() UpperCamelCase__ = os.path.join(a__ , f"""train_pseudo.{args.data_file_extension}""" ) if args.evaluation_strategy != IntervalStrategy.NO.value: UpperCamelCase__ = eval_result if best_iteration is None: UpperCamelCase__ = new_iteration UpperCamelCase__ = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: UpperCamelCase__ = new_iteration UpperCamelCase__ = new_eval_result UpperCamelCase__ = 0 else: if new_eval_result == best_eval_result: UpperCamelCase__ = new_iteration UpperCamelCase__ = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: UpperCamelCase__ = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("""Best iteration: %d""" , a__ ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , a__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(a__ , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(a__ , """eval_results_best-iteration.json""" ) , ) else: # Assume that the last iteration is the best logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , a__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(a__ , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(a__ , """eval_results_best-iteration.json""" ) , )
87
1
from __future__ import annotations __UpperCAmelCase = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ): SCREAMING_SNAKE_CASE_ = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) ) ] # the reference grid SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) ) ] # the action grid SCREAMING_SNAKE_CASE_ = init[0] SCREAMING_SNAKE_CASE_ = init[1] SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = g + heuristic[x][y] # cost from starting cell to destination cell SCREAMING_SNAKE_CASE_ = [[f, g, x, y]] SCREAMING_SNAKE_CASE_ = False # flag that is set when search is complete SCREAMING_SNAKE_CASE_ = False # flag set if we can't find expand while not found and not resign: if len(__lowerCamelCase ) == 0: raise ValueError('''Algorithm is unable to find solution''' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() SCREAMING_SNAKE_CASE_ = cell.pop() SCREAMING_SNAKE_CASE_ = next_cell[2] SCREAMING_SNAKE_CASE_ = next_cell[3] SCREAMING_SNAKE_CASE_ = next_cell[1] if x == goal[0] and y == goal[1]: SCREAMING_SNAKE_CASE_ = True else: for i in range(len(__lowerCamelCase ) ): # to try out different valid actions SCREAMING_SNAKE_CASE_ = x + DIRECTIONS[i][0] SCREAMING_SNAKE_CASE_ = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(__lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: SCREAMING_SNAKE_CASE_ = g + cost SCREAMING_SNAKE_CASE_ = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = i SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = goal[0] SCREAMING_SNAKE_CASE_ = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: SCREAMING_SNAKE_CASE_ = x - DIRECTIONS[action[x][y]][0] SCREAMING_SNAKE_CASE_ = y - DIRECTIONS[action[x][y]][1] SCREAMING_SNAKE_CASE_ = xa SCREAMING_SNAKE_CASE_ = ya invpath.append([x, y] ) SCREAMING_SNAKE_CASE_ = [] for i in range(len(__lowerCamelCase ) ): path.append(invpath[len(__lowerCamelCase ) - 1 - i] ) return path, action if __name__ == "__main__": __UpperCAmelCase = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] __UpperCAmelCase = [0, 0] # all coordinates are given in format [y,x] __UpperCAmelCase = [len(grid) - 1, len(grid[0]) - 1] __UpperCAmelCase = 1 # the cost map which pushes the path closer to the goal __UpperCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): __UpperCAmelCase = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map __UpperCAmelCase = 99 __UpperCAmelCase , __UpperCAmelCase = search(grid, init, goal, cost, heuristic) print("ACTION MAP") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
299
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCAmelCase_ =["input_features", "is_longer"] def __init__( self , _A=64 , _A=48000 , _A=480 , _A=10 , _A=1024 , _A=0.0 , _A=False , _A = 0 , _A = 14000 , _A = None , _A = "fusion" , _A = "repeatpad" , **_A , ) -> Dict: super().__init__( feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , ) SCREAMING_SNAKE_CASE_ = top_db SCREAMING_SNAKE_CASE_ = truncation SCREAMING_SNAKE_CASE_ = padding SCREAMING_SNAKE_CASE_ = fft_window_size SCREAMING_SNAKE_CASE_ = (fft_window_size >> 1) + 1 SCREAMING_SNAKE_CASE_ = hop_length SCREAMING_SNAKE_CASE_ = max_length_s SCREAMING_SNAKE_CASE_ = max_length_s * sampling_rate SCREAMING_SNAKE_CASE_ = sampling_rate SCREAMING_SNAKE_CASE_ = frequency_min SCREAMING_SNAKE_CASE_ = frequency_max SCREAMING_SNAKE_CASE_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale='''htk''' , ) SCREAMING_SNAKE_CASE_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , ) def _UpperCamelCase ( self ) -> Dict[str, Any]: SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def _UpperCamelCase ( self , _A , _A = None ) -> np.ndarray: SCREAMING_SNAKE_CASE_ = spectrogram( _A , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel='''dB''' , ) return log_mel_spectrogram.T def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[Any]: SCREAMING_SNAKE_CASE_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk SCREAMING_SNAKE_CASE_ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk SCREAMING_SNAKE_CASE_ = [0] # randomly choose index for each part SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[0] ) SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[1] ) SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[2] ) SCREAMING_SNAKE_CASE_ = mel[idx_front : idx_front + chunk_frames, :] SCREAMING_SNAKE_CASE_ = mel[idx_middle : idx_middle + chunk_frames, :] SCREAMING_SNAKE_CASE_ = mel[idx_back : idx_back + chunk_frames, :] SCREAMING_SNAKE_CASE_ = torch.tensor(mel[None, None, :] ) SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate( _A , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_A ) SCREAMING_SNAKE_CASE_ = mel_shrink[0][0].numpy() SCREAMING_SNAKE_CASE_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def _UpperCamelCase ( self , _A , _A , _A , _A ) -> np.array: if waveform.shape[0] > max_length: if truncation == "rand_trunc": SCREAMING_SNAKE_CASE_ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad SCREAMING_SNAKE_CASE_ = len(_A ) - max_length SCREAMING_SNAKE_CASE_ = np.random.randint(0 , overflow + 1 ) SCREAMING_SNAKE_CASE_ = waveform[idx : idx + max_length] SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :] elif truncation == "fusion": SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters ) SCREAMING_SNAKE_CASE_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed SCREAMING_SNAKE_CASE_ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. SCREAMING_SNAKE_CASE_ = np.stack([mel, mel, mel, mel] , axis=0 ) SCREAMING_SNAKE_CASE_ = False else: SCREAMING_SNAKE_CASE_ = self._random_mel_fusion(_A , _A , _A ) SCREAMING_SNAKE_CASE_ = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: SCREAMING_SNAKE_CASE_ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) ) SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) ) SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , _A ) ) SCREAMING_SNAKE_CASE_ = np.pad(_A , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 ) if truncation == "fusion": SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters ) SCREAMING_SNAKE_CASE_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , **_A , ) -> BatchFeature: SCREAMING_SNAKE_CASE_ = truncation if truncation is not None else self.truncation SCREAMING_SNAKE_CASE_ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) SCREAMING_SNAKE_CASE_ = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) SCREAMING_SNAKE_CASE_ = is_batched_numpy or ( isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_A , np.ndarray ): SCREAMING_SNAKE_CASE_ = np.asarray(_A , dtype=np.floataa ) elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE_ = [np.asarray(_A )] # convert to mel spectrogram, truncate and pad if needed. SCREAMING_SNAKE_CASE_ = [ self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A ) for waveform in raw_speech ] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] for mel, longer in padded_inputs: input_mel.append(_A ) is_longer.append(_A ) if truncation == "fusion" and sum(_A ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer SCREAMING_SNAKE_CASE_ = np.random.randint(0 , len(_A ) ) SCREAMING_SNAKE_CASE_ = True if isinstance(input_mel[0] , _A ): SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool SCREAMING_SNAKE_CASE_ = [[longer] for longer in is_longer] SCREAMING_SNAKE_CASE_ = {'''input_features''': input_mel, '''is_longer''': is_longer} SCREAMING_SNAKE_CASE_ = BatchFeature(_A ) if return_tensors is not None: SCREAMING_SNAKE_CASE_ = input_features.convert_to_tensors(_A ) return input_features
299
1
from __future__ import annotations def UpperCamelCase ( __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ): snake_case : Optional[int] = [] snake_case , snake_case : int = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) snake_case : int = result + left + right return input_list def UpperCamelCase ( __lowerCamelCase : list ): if len(__lowerCamelCase ) <= 1: return input_list snake_case : Optional[int] = list(__lowerCamelCase ) # iteration for two-way merging snake_case : Optional[Any] = 2 while p <= len(__lowerCamelCase ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ): snake_case : int = i snake_case : Optional[Any] = i + p - 1 snake_case : Any = (low + high + 1) // 2 snake_case : List[str] = merge(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # final merge of last two parts if p * 2 >= len(__lowerCamelCase ): snake_case : List[str] = i snake_case : Optional[Any] = merge(__lowerCamelCase , 0 , __lowerCamelCase , len(__lowerCamelCase ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": __lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip() if user_input == "": __lowerCamelCase = [] else: __lowerCamelCase = [int(item.strip()) for item in user_input.split(""",""")] print(iter_merge_sort(unsorted))
10
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } __lowerCamelCase = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ): for attribute in key.split("." ): snake_case : Tuple = getattr(__lowerCamelCase , __lowerCamelCase ) if weight_type is not None: snake_case : int = getattr(__lowerCamelCase , __lowerCamelCase ).shape else: snake_case : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case : Dict = value elif weight_type == "weight_g": snake_case : Optional[int] = value elif weight_type == "weight_v": snake_case : Optional[int] = value elif weight_type == "bias": snake_case : Tuple = value else: snake_case : Optional[int] = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] ): snake_case : int = [] snake_case : List[Any] = fairseq_model.state_dict() snake_case : int = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): snake_case : List[str] = False if "conv_layers" in name: load_conv_layer( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , ) snake_case : str = True else: for key, mapped_key in MAPPING.items(): snake_case : Tuple = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key): # special case since naming is very similar continue snake_case : Tuple = True if "*" in mapped_key: snake_case : Union[str, Any] = name.split(__lowerCamelCase )[0].split("." )[-2] snake_case : Any = mapped_key.replace("*" , __lowerCamelCase ) if "weight_g" in name: snake_case : Optional[int] = "weight_g" elif "weight_v" in name: snake_case : Tuple = "weight_v" elif "bias" in name: snake_case : Dict = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case : str = "weight" else: snake_case : str = None set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) continue if not is_used: unused_weights.append(__lowerCamelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Any ): snake_case : str = full_name.split("conv_layers." )[-1] snake_case : int = name.split("." ) snake_case : Optional[int] = int(items[0] ) snake_case : Dict = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case : Union[str, Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) snake_case : Dict = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case : Optional[Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__lowerCamelCase ) @torch.no_grad() def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=True ): if config_path is not None: snake_case : str = UniSpeechSatConfig.from_pretrained(__lowerCamelCase ) else: snake_case : str = UniSpeechSatConfig() snake_case : Tuple = "" if is_finetuned: snake_case : Tuple = UniSpeechSatForCTC(__lowerCamelCase ) else: snake_case : List[Any] = UniSpeechSatForPreTraining(__lowerCamelCase ) snake_case , snake_case , snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) snake_case : Dict = model[0].eval() recursively_load_weights(__lowerCamelCase , __lowerCamelCase ) hf_wavavec.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) __lowerCamelCase = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
10
1
'''simple docstring''' def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" while b: SCREAMING_SNAKE_CASE : Dict = b, a % b return a def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return a if b == 0 else euclidean_gcd_recursive(__snake_case , a % b ) def __A ( ): """simple docstring""" print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' ) print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' ) print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' ) print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' ) print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' ) print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' ) print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' ) print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' ) print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' ) print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' ) if __name__ == "__main__": main()
323
"""simple docstring""" def __magic_name__ ( __snake_case : list ) -> list: if len(__snake_case ) < 2: return collection def circle_sort_util(__snake_case : list , __snake_case : int , __snake_case : int ) -> bool: lowercase : List[Any] = False if low == high: return swapped lowercase : Union[str, Any] = low lowercase : str = high while left < right: if collection[left] > collection[right]: lowercase , lowercase : Optional[Any] = ( collection[right], collection[left], ) lowercase : Tuple = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: lowercase , lowercase : str = ( collection[right + 1], collection[left], ) lowercase : Union[str, Any] = True lowercase : Any = low + int((high - low) / 2 ) lowercase : Tuple = circle_sort_util(__snake_case , __snake_case , __snake_case ) lowercase : List[Any] = circle_sort_util(__snake_case , mid + 1 , __snake_case ) return swapped or left_swap or right_swap lowercase : int = True while is_not_sorted is True: lowercase : int = circle_sort_util(__snake_case , 0 , len(__snake_case ) - 1 ) return collection if __name__ == "__main__": _A : str = input("""Enter numbers separated by a comma:\n""").strip() _A : Dict = [int(item) for item in user_input.split(""",""")] print(circle_sort(unsorted))
202
0
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowercase = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Optional[int] = XGLMTokenizer UpperCAmelCase : List[Any] = XGLMTokenizerFast UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : List[str] = True def __snake_case ( self : Dict): super().setUp() # We have a SentencePiece fixture for testing a : Union[str, Any] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase) tokenizer.save_pretrained(self.tmpdirname) def __snake_case ( self : str): a : List[Any] = "<pad>" a : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase) , __UpperCAmelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase) , __UpperCAmelCase) def __snake_case ( self : Any): a : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , "<s>") self.assertEqual(vocab_keys[1] , "<pad>") self.assertEqual(len(__UpperCAmelCase) , 1008) def __snake_case ( self : Union[str, Any]): self.assertEqual(self.get_tokenizer().vocab_size , 1008) def __snake_case ( self : int): a : List[Any] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase) a : Dict = tokenizer.tokenize("This is a test") self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) a : Dict = tokenizer.convert_tokens_to_ids(__UpperCAmelCase) self.assertListEqual( __UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) a : Optional[Any] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def __snake_case ( self : int): return XGLMTokenizer.from_pretrained("facebook/xglm-564M") def __snake_case ( self : Any): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__UpperCAmelCase , f.name) a : Optional[Any] = XGLMTokenizer(f.name , keep_accents=__UpperCAmelCase) a : Tuple = pickle.dumps(__UpperCAmelCase) pickle.loads(__UpperCAmelCase) def __snake_case ( self : Optional[int]): if not self.test_rust_tokenizer: return a : Any = self.get_tokenizer() a : Tuple = self.get_rust_tokenizer() a : Tuple = "I was born in 92000, and this is falsé." a : Union[str, Any] = tokenizer.tokenize(__UpperCAmelCase) a : str = rust_tokenizer.tokenize(__UpperCAmelCase) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase) a : List[str] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase) a : Union[str, Any] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase) a : str = self.get_rust_tokenizer() a : int = tokenizer.encode(__UpperCAmelCase) a : Any = rust_tokenizer.encode(__UpperCAmelCase) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase) @slow def __snake_case ( self : Optional[int]): a : str = "Hello World!" a : Dict = [2, 31227, 4447, 35] self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase)) @slow def __snake_case ( self : Any): a : List[Any] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off a : Dict = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] # fmt: on self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase)) @slow def __snake_case ( self : Optional[Any]): # fmt: off a : Optional[Any] = { "input_ids": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name="facebook/xglm-564M" , padding=__UpperCAmelCase , )
226
"""simple docstring""" import sys import turtle def lowercase ( A_ , A_ )-> tuple[float, float]: '''simple docstring''' return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowercase ( A_ , A_ , A_ , A_ , )-> None: '''simple docstring''' my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 ) triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 ) triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( """Correct format for using this script: """ """python fractals.py <int:depth_for_fractal>""" ) __lowercase = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("""red""") __lowercase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
226
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE :Union[str, Any] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Dict = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
159
def _lowerCAmelCase ( lowerCAmelCase_ :int = 1_000 )->int: '''simple docstring''' snake_case_ , snake_case_ = 1, 1 snake_case_ = 2 while True: snake_case_ = 0 snake_case_ = fa + fa snake_case_ , snake_case_ = fa, f index += 1 for _ in str(lowerCAmelCase_ ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
159
1
from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def UpperCamelCase ( _a , _a ) -> str: '''simple docstring''' lowercase_ :int = [] for part_id in partition_order: lowercase_ :List[Any] = df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect() for row_idx, row in enumerate(_a ): expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def UpperCamelCase ( ) -> List[Any]: '''simple docstring''' lowercase_ :List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() lowercase_ :List[str] = spark.range(1_0_0 ).repartition(1 ) lowercase_ :List[str] = Spark(_a ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=1_6 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 5_0 @require_not_windows @require_dill_gt_0_3_2 def UpperCamelCase ( ) -> Union[str, Any]: '''simple docstring''' lowercase_ :List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() lowercase_ :Union[str, Any] = spark.range(1_0 ).repartition(2 ) lowercase_ :Dict = [1, 0] lowercase_ :Any = _generate_iterable_examples(_a , _a ) # Reverse the partitions. lowercase_ :Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , _a ) for i, (row_id, row_dict) in enumerate(generate_fn() ): lowercase_ :str = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def UpperCamelCase ( ) -> int: '''simple docstring''' lowercase_ :str = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() lowercase_ :Dict = spark.range(1_0 ).repartition(1 ) lowercase_ :List[str] = SparkExamplesIterable(_a ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(_a ): assert row_id == f"0_{i}" assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def UpperCamelCase ( ) -> Optional[Any]: '''simple docstring''' lowercase_ :Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() lowercase_ :Dict = spark.range(3_0 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch('''numpy.random.Generator''' ) as generator_mock: lowercase_ :Any = lambda _a : x.reverse() lowercase_ :int = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [2, 1, 0] ) lowercase_ :int = SparkExamplesIterable(_a ).shuffle_data_sources(_a ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(_a ): lowercase_ :int = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def UpperCamelCase ( ) -> Any: '''simple docstring''' lowercase_ :List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() lowercase_ :Optional[int] = spark.range(2_0 ).repartition(4 ) # Partitions 0 and 2 lowercase_ :Dict = SparkExamplesIterable(_a ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase_ :Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [0, 2] ) for i, (row_id, row_dict) in enumerate(_a ): lowercase_ :str = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 lowercase_ :Optional[int] = SparkExamplesIterable(_a ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase_ :Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [1, 3] ) for i, (row_id, row_dict) in enumerate(_a ): lowercase_ :Union[str, Any] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def UpperCamelCase ( ) -> List[Any]: '''simple docstring''' lowercase_ :List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() lowercase_ :int = spark.range(1_0_0 ).repartition(1 ) lowercase_ :Union[str, Any] = Spark(_a ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 1_0_0
354
import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler SCREAMING_SNAKE_CASE : Dict = 16 SCREAMING_SNAKE_CASE : str = 32 def UpperCamelCase ( _a ) -> Any: '''simple docstring''' return int(x / 2**2_0 ) class UpperCamelCase : '''simple docstring''' def __enter__( self ): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero lowercase_ :List[str] = torch.cuda.memory_allocated() return self def __exit__( self , *UpperCamelCase_ ): gc.collect() torch.cuda.empty_cache() lowercase_ :Any = torch.cuda.memory_allocated() lowercase_ :Union[str, Any] = torch.cuda.max_memory_allocated() lowercase_ :Optional[int] = bamb(self.end - self.begin ) lowercase_ :List[str] = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def UpperCamelCase ( _a , _a = 1_6 , _a = "bert-base-cased" , _a = 3_2_0 , _a = 1_6_0 , ) -> Optional[Any]: '''simple docstring''' lowercase_ :Optional[Any] = AutoTokenizer.from_pretrained(_a ) lowercase_ :int = load_dataset( '''glue''' , '''mrpc''' , split={'''train''': f"train[:{n_train}]", '''validation''': f"validation[:{n_val}]"} ) def tokenize_function(_a ): # max_length=None => use the model max length (it's actually the default) lowercase_ :Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_a , max_length=_a ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase_ :Tuple = datasets.map( _a , batched=_a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_a ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase_ :int = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_a ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_a , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' ) return tokenizer.pad(_a , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. lowercase_ :Union[str, Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=_a , collate_fn=_a , batch_size=_a ) lowercase_ :str = DataLoader( tokenized_datasets['''validation'''] , shuffle=_a , collate_fn=_a , batch_size=_a ) return train_dataloader, eval_dataloader def UpperCamelCase ( _a , _a ) -> List[Any]: '''simple docstring''' lowercase_ :Tuple = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase_ :Dict = config['''lr'''] lowercase_ :List[Any] = int(config['''num_epochs'''] ) lowercase_ :Tuple = int(config['''seed'''] ) lowercase_ :List[str] = int(config['''batch_size'''] ) lowercase_ :Optional[Any] = args.model_name_or_path set_seed(_a ) lowercase_ , lowercase_ :Any = get_dataloaders(_a , _a , _a , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase_ :Tuple = AutoModelForSequenceClassification.from_pretrained(_a , return_dict=_a ) # Instantiate optimizer lowercase_ :Any = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowercase_ :str = optimizer_cls(params=model.parameters() , lr=_a ) if accelerator.state.deepspeed_plugin is not None: lowercase_ :str = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: lowercase_ :List[str] = 1 lowercase_ :Union[str, Any] = (len(_a ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowercase_ :int = get_linear_schedule_with_warmup( optimizer=_a , num_warmup_steps=0 , num_training_steps=_a , ) else: lowercase_ :str = DummyScheduler(_a , total_num_steps=_a , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ :Optional[Any] = accelerator.prepare( _a , _a , _a , _a , _a ) # We need to keep track of how many total steps we have iterated over lowercase_ :Dict = 0 # We also need to keep track of the stating epoch so files are named properly lowercase_ :int = 0 # Now we train the model lowercase_ :str = {} for epoch in range(_a , _a ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(_a ): lowercase_ :Optional[Any] = model(**_a ) lowercase_ :Dict = outputs.loss lowercase_ :Dict = loss / gradient_accumulation_steps accelerator.backward(_a ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) ) accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) ) accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) ) accelerator.print( '''Total Peak Memory consumed during the train (max): {}'''.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) lowercase_ :Union[str, Any] = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f: json.dump(_a , _a ) def UpperCamelCase ( ) -> Union[str, Any]: '''simple docstring''' lowercase_ :List[str] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=_a , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_a , ) parser.add_argument( '''--output_dir''' , type=_a , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--peak_memory_upper_bound''' , type=_a , default=_a , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , ) parser.add_argument( '''--n_train''' , type=_a , default=3_2_0 , help='''Number of training examples to use.''' , ) parser.add_argument( '''--n_val''' , type=_a , default=1_6_0 , help='''Number of validation examples to use.''' , ) parser.add_argument( '''--num_epochs''' , type=_a , default=1 , help='''Number of train epochs.''' , ) lowercase_ :Dict = parser.parse_args() lowercase_ :Dict = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 4_2, '''batch_size''': 1_6} training_function(_a , _a ) if __name__ == "__main__": main()
252
0
import string def A_ ( snake_case : str ) -> None: '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): __UpperCamelCase = '''''' for symbol in message: if symbol in string.ascii_uppercase: __UpperCamelCase = string.ascii_uppercase.find(snake_case ) __UpperCamelCase = num - key if num < 0: __UpperCamelCase = num + len(string.ascii_uppercase ) __UpperCamelCase = translated + string.ascii_uppercase[num] else: __UpperCamelCase = translated + symbol print(f"Decryption using Key #{key}: {translated}" ) def A_ ( ) -> None: '''simple docstring''' __UpperCamelCase = input('''Encrypted message: ''' ) __UpperCamelCase = message.upper() decrypt(snake_case ) if __name__ == "__main__": import doctest doctest.testmod() main()
328
from __future__ import annotations import math def A_ ( snake_case : int ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True lowercase__ : int = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)] def A_ ( snake_case : int ) -> list[int]: '''simple docstring''' if not isinstance(snake_case , snake_case ): raise ValueError('''n must be an integer''' ) if n <= 0: raise ValueError('''n must be >= 0''' ) __UpperCamelCase = [] for num in range(len(snake_case ) ): __UpperCamelCase = 0 while 2 * i * i <= odd_composites[num]: __UpperCamelCase = odd_composites[num] - 2 * i * i if is_prime(snake_case ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(snake_case ) == n: return list_nums return [] def A_ ( ) -> int: '''simple docstring''' return compute_nums(1 )[0] if __name__ == "__main__": print(F"{solution() = }")
328
1
__A : Optional[int] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' __A : Any = [{'type': 'code', 'content': INSTALL_CONTENT}] __A : List[str] = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
356
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
57
0
'''simple docstring''' def UpperCamelCase_( snake_case : str ): '''simple docstring''' return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") ) def UpperCamelCase_( snake_case : str ): '''simple docstring''' snake_case_ = credit_card_number snake_case_ = 0 snake_case_ = len(snake_case ) - 2 for i in range(snake_case , -1 , -2 ): # double the value of every second digit snake_case_ = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 1_0 digit += 1 snake_case_ = cc_number[:i] + str(snake_case ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(snake_case ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 1_0 == 0 def UpperCamelCase_( snake_case : str ): '''simple docstring''' snake_case_ = f'{credit_card_number} is an invalid credit card number because' if not credit_card_number.isdigit(): print(f'{error_message} it has nonnumerical characters.' ) return False if not 1_3 <= len(snake_case ) <= 1_6: print(f'{error_message} of its length.' ) return False if not validate_initial_digits(snake_case ): print(f'{error_message} of its first two digits.' ) return False if not luhn_validation(snake_case ): print(f'{error_message} it fails the Luhn check.' ) return False print(f'{credit_card_number} is a valid credit card number.' ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("4111111111111111") validate_credit_card_number("32323")
85
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} _SCREAMING_SNAKE_CASE : Union[str, Any] = { "tokenizer_file": { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json", }, } _SCREAMING_SNAKE_CASE : int = { "gpt-neox-20b": 2048, } class _snake_case ( lowercase_ ): lowerCAmelCase_ : str = VOCAB_FILES_NAMES lowerCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ : str = ["input_ids", "attention_mask"] def __init__( self , a__=None , a__=None , a__=None , a__="<|endoftext|>" , a__="<|endoftext|>" , a__="<|endoftext|>" , a__=False , **a__ , ) -> Tuple: '''simple docstring''' super().__init__( a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , add_prefix_space=a__ , **a__ , ) snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space: snake_case_ = getattr(a__ , pre_tok_state.pop("type" ) ) snake_case_ = add_prefix_space snake_case_ = pre_tok_class(**a__ ) snake_case_ = add_prefix_space def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]: '''simple docstring''' snake_case_ = self._tokenizer.model.save(a__ , name=a__ ) return tuple(a__ ) def lowerCAmelCase__ ( self , a__ ) -> List[int]: '''simple docstring''' snake_case_ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] ) if len(a__ ) > self.model_max_length: snake_case_ = input_ids[-self.model_max_length :] return input_ids
85
1
from ..utils import DummyObject, requires_backends class UpperCAmelCase ( metaclass=__A ): '''simple docstring''' lowerCamelCase_ = ['''torch''', '''torchsde'''] def __init__( self , *lowercase , **lowercase ): """simple docstring""" requires_backends(self , ['torch', 'torchsde'] ) @classmethod def lowerCAmelCase_ ( cls , *lowercase , **lowercase ): """simple docstring""" requires_backends(cls , ['torch', 'torchsde'] ) @classmethod def lowerCAmelCase_ ( cls , *lowercase , **lowercase ): """simple docstring""" requires_backends(cls , ['torch', 'torchsde'] )
360
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCAmelCase = { """configuration_autoformer""": [ """AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AutoformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """AutoformerForPrediction""", """AutoformerModel""", """AutoformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
192
0
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class snake_case__ ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self : List[str] ) ->Union[str, Any]: snake_case__ : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) snake_case__ : int = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__lowerCAmelCase ) snake_case__ : Union[str, Any] = -1 snake_case__ : Dict = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) snake_case__ : int = model.generate(__lowerCAmelCase, max_new_tokens=1_0, do_sample=__lowerCAmelCase ) snake_case__ : List[str] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: snake_case__ : List[str] = TextStreamer(__lowerCAmelCase ) model.generate(__lowerCAmelCase, max_new_tokens=1_0, do_sample=__lowerCAmelCase, streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer snake_case__ : int = cs.out[:-1] self.assertEqual(__lowerCAmelCase, __lowerCAmelCase ) def lowercase_ ( self : Dict ) ->Union[str, Any]: snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) snake_case__ : Optional[Any] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__lowerCAmelCase ) snake_case__ : Optional[Any] = -1 snake_case__ : List[str] = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) snake_case__ : Tuple = model.generate(__lowerCAmelCase, max_new_tokens=1_0, do_sample=__lowerCAmelCase ) snake_case__ : Optional[int] = tokenizer.decode(greedy_ids[0] ) snake_case__ : Optional[int] = TextIteratorStreamer(__lowerCAmelCase ) snake_case__ : Optional[int] = {'input_ids': input_ids, 'max_new_tokens': 1_0, 'do_sample': False, 'streamer': streamer} snake_case__ : List[str] = Thread(target=model.generate, kwargs=__lowerCAmelCase ) thread.start() snake_case__ : Union[str, Any] = '' for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCAmelCase, __lowerCAmelCase ) def lowercase_ ( self : Tuple ) ->Union[str, Any]: snake_case__ : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) snake_case__ : Any = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__lowerCAmelCase ) snake_case__ : Optional[int] = -1 snake_case__ : List[str] = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) snake_case__ : List[Any] = model.generate(__lowerCAmelCase, max_new_tokens=1_0, do_sample=__lowerCAmelCase ) snake_case__ : Union[str, Any] = greedy_ids[:, input_ids.shape[1] :] snake_case__ : Any = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: snake_case__ : str = TextStreamer(__lowerCAmelCase, skip_prompt=__lowerCAmelCase ) model.generate(__lowerCAmelCase, max_new_tokens=1_0, do_sample=__lowerCAmelCase, streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer snake_case__ : Optional[int] = cs.out[:-1] self.assertEqual(__lowerCAmelCase, __lowerCAmelCase ) def lowercase_ ( self : str ) ->Union[str, Any]: snake_case__ : Any = AutoTokenizer.from_pretrained('distilgpt2' ) snake_case__ : Any = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(__lowerCAmelCase ) snake_case__ : Optional[int] = -1 snake_case__ : Union[str, Any] = torch.ones((1, 5), device=__lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: snake_case__ : Optional[int] = TextStreamer(__lowerCAmelCase, skip_special_tokens=__lowerCAmelCase ) model.generate(__lowerCAmelCase, max_new_tokens=1, do_sample=__lowerCAmelCase, streamer=__lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token snake_case__ : List[Any] = cs.out[:-1] # Remove the final "\n" snake_case__ : Any = tokenizer(__lowerCAmelCase, return_tensors='pt' ) self.assertEqual(streamer_text_tokenized.input_ids.shape, (1, 1) ) def lowercase_ ( self : Tuple ) ->Optional[int]: snake_case__ : str = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) snake_case__ : Tuple = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__lowerCAmelCase ) snake_case__ : List[Any] = -1 snake_case__ : Any = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) snake_case__ : str = TextIteratorStreamer(__lowerCAmelCase, timeout=0.0_0_1 ) snake_case__ : List[Any] = {'input_ids': input_ids, 'max_new_tokens': 1_0, 'do_sample': False, 'streamer': streamer} snake_case__ : Dict = Thread(target=model.generate, kwargs=__lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCAmelCase ): snake_case__ : List[str] = '' for new_text in streamer: streamer_text += new_text
277
def lowerCAmelCase__(__snake_case ) -> list: '''simple docstring''' lowerCamelCase__ = len(__snake_case ) for _ in range(__snake_case ): for i in range(_ % 2 ,arr_size - 1 ,2 ): if arr[i + 1] < arr[i]: lowerCamelCase__ , lowerCamelCase__ = arr[i + 1], arr[i] return arr if __name__ == "__main__": _a = list(range(10, 0, -1)) print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
209
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class UpperCAmelCase_ ( A_ ): def __init__( self : str , *snake_case_ : List[Any] , **snake_case_ : Optional[Any] ) -> None: '''simple docstring''' warnings.warn( "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use CLIPImageProcessor instead." , snake_case_ , ) super().__init__(*snake_case_ , **snake_case_ )
230
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING_SNAKE_CASE = { "Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json", # See all DPT models at https://huggingface.co/models?filter=dpt } class UpperCAmelCase_ ( A_ ): lowercase__ = '''dpt''' def __init__( self : List[Any] , snake_case_ : Union[str, Any]=768 , snake_case_ : Tuple=12 , snake_case_ : Tuple=12 , snake_case_ : List[Any]=3_072 , snake_case_ : Dict="gelu" , snake_case_ : Tuple=0.0 , snake_case_ : int=0.0 , snake_case_ : Optional[int]=0.02 , snake_case_ : Union[str, Any]=1e-12 , snake_case_ : Tuple=384 , snake_case_ : Tuple=16 , snake_case_ : Optional[Any]=3 , snake_case_ : Dict=False , snake_case_ : Any=True , snake_case_ : Any=[2, 5, 8, 11] , snake_case_ : Union[str, Any]="project" , snake_case_ : Union[str, Any]=[4, 2, 1, 0.5] , snake_case_ : List[str]=[96, 192, 384, 768] , snake_case_ : int=256 , snake_case_ : Tuple=-1 , snake_case_ : List[str]=False , snake_case_ : int=True , snake_case_ : List[Any]=0.4 , snake_case_ : Optional[Any]=255 , snake_case_ : List[str]=0.1 , snake_case_ : List[str]=[1, 1_024, 24, 24] , snake_case_ : Union[str, Any]=[0, 1] , snake_case_ : Any=None , **snake_case_ : Optional[Any] , ) -> Optional[int]: '''simple docstring''' super().__init__(**snake_case_ ) A__ = hidden_size A__ = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info("Initializing the config with a `BiT` backbone." ) A__ = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, } A__ = BitConfig(**snake_case_ ) elif isinstance(snake_case_ , snake_case_ ): logger.info("Initializing the config with a `BiT` backbone." ) A__ = BitConfig(**snake_case_ ) elif isinstance(snake_case_ , snake_case_ ): A__ = backbone_config else: raise ValueError( F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" ) A__ = backbone_featmap_shape A__ = neck_ignore_stages if readout_type != "project": raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." ) else: A__ = None A__ = None A__ = [] A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = initializer_range A__ = layer_norm_eps A__ = image_size A__ = patch_size A__ = num_channels A__ = qkv_bias A__ = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" ) A__ = readout_type A__ = reassemble_factors A__ = neck_hidden_sizes A__ = fusion_hidden_size A__ = head_in_index A__ = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) A__ = use_auxiliary_head A__ = auxiliary_loss_weight A__ = semantic_loss_ignore_index A__ = semantic_classifier_dropout def __magic_name__ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: A__ = self.backbone_config.to_dict() A__ = self.__class__.model_type return output
230
1
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase_ ( _lowerCamelCase : Features): lowercase__ : List[Any] = np.inf def set_batch_size(_lowerCamelCase : FeatureType) -> None: nonlocal batch_size if isinstance(_lowerCamelCase , _lowerCamelCase): lowercase__ : Any = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS) elif isinstance(_lowerCamelCase , _lowerCamelCase): lowercase__ : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS) elif isinstance(_lowerCamelCase , _lowerCamelCase) and feature.dtype == "binary": lowercase__ : Dict = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS) _visit(_lowerCamelCase , _lowerCamelCase) return None if batch_size is np.inf else batch_size class snake_case_ ( __A ): def __init__( self : List[str] , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Union[str, Any] , ) -> Union[str, Any]: super().__init__( lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , ) lowercase__ : List[str] = path_or_paths if isinstance(lowercase_ , lowercase_ ) else {self.split: path_or_paths} lowercase__ : Optional[int] = _PACKAGED_DATASETS_MODULES["parquet"][1] lowercase__ : Union[str, Any] = Parquet( cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , ) def __UpperCamelCase ( self : Tuple ) -> Dict: # Build iterable dataset if self.streaming: lowercase__ : List[Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowercase__ : Any = None lowercase__ : int = None lowercase__ : Dict = None lowercase__ : Union[str, Any] = None self.builder.download_and_prepare( download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , ) lowercase__ : Optional[Any] = self.builder.as_dataset( split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory ) return dataset class snake_case_ : def __init__( self : str , lowercase_ : Dataset , lowercase_ : Union[PathLike, BinaryIO] , lowercase_ : Optional[int] = None , **lowercase_ : Optional[Any] , ) -> int: lowercase__ : Dict = dataset lowercase__ : List[Any] = path_or_buf lowercase__ : Any = batch_size or get_writer_batch_size(dataset.features ) lowercase__ : Any = parquet_writer_kwargs def __UpperCamelCase ( self : List[Any] ) -> int: lowercase__ : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , "wb+" ) as buffer: lowercase__ : str = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs ) else: lowercase__ : Tuple = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs ) return written def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : BinaryIO , lowercase_ : int , **lowercase_ : Optional[int] ) -> int: lowercase__ : List[Any] = 0 lowercase__ : List[Any] = parquet_writer_kwargs.pop("path_or_buf" , lowercase_ ) lowercase__ : List[Any] = self.dataset.features.arrow_schema lowercase__ : Optional[Any] = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_ ) for offset in logging.tqdm( range(0 , len(self.dataset ) , lowercase_ ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ): lowercase__ : Any = query_table( table=self.dataset._data , key=slice(lowercase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(lowercase_ ) written += batch.nbytes writer.close() return written
87
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict): # Initialise PyTorch model lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase) print(f'''Building PyTorch model from configuration: {config}''') lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase) # Load weights from tf checkpoint load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''') torch.save(model.state_dict() , _lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
87
1
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging A : List[str] = logging.get_logger(__name__) def __lowerCAmelCase ( a__ ) -> List[int]: if isinstance(a__ , np.ndarray ): return list(tensor.shape ) __a = tf.shape(a__ ) if tensor.shape == tf.TensorShape(a__ ): return dynamic __a = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(a__ )] def __lowerCAmelCase ( a__ , a__ = None , a__ = None ) -> tf.Tensor: return tf.nn.softmax(logits=logits + 1e-9 , axis=a__ , name=a__ ) def __lowerCAmelCase ( a__ , a__ , a__ , a__=1e-5 , a__=-1 ) -> List[str]: # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(a__ , a__ ): raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' ) # Get mean and variance on the axis to be normalized __a , __a = tf.nn.moments(a__ , axes=[axis] , keepdims=a__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis __a = [1] * inputs.shape.rank __a = shape_list(a__ )[axis] __a = tf.reshape(a__ , a__ ) __a = tf.reshape(a__ , a__ ) # Compute layer normalization using the batch_normalization # function. __a = tf.nn.batch_normalization( a__ , a__ , a__ , offset=a__ , scale=a__ , variance_epsilon=a__ , ) return outputs def __lowerCAmelCase ( a__ , a__=0 , a__=-1 ) -> List[str]: # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input __a = tf.shape(a__ ) __a = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) __a = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(a__ , a__ ) def __lowerCAmelCase ( a__ ) -> tf.Tensor: if not isinstance(a__ , tf.Tensor ): __a = tf.convert_to_tensor(a__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: __a = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: __a = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) __a = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def __lowerCAmelCase ( a__ , a__ , a__ = "input_ids" ) -> None: tf.debugging.assert_less( a__ , tf.cast(a__ , dtype=tensor.dtype ) , message=( F"""The maximum value of {tensor_name} ({tf.math.reduce_max(a__ )}) must be smaller than the embedding """ F"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.""" ) , ) def __lowerCAmelCase ( a__ , a__ , a__ ) -> int: __a = 6_4512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. __a = [x for x in data if len(a__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( '''The following attributes cannot be saved to HDF5 file because ''' F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """ F"""bytes: {bad_attributes}""" ) __a = np.asarray(a__ ) __a = 1 __a = np.array_split(a__ , a__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 __a = np.array_split(a__ , a__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(a__ ): __a = chunk_data else: __a = data def __lowerCAmelCase ( a__ , a__ ) -> str: if name in group.attrs: __a = [n.decode('''utf8''' ) if hasattr(a__ , '''decode''' ) else n for n in group.attrs[name]] else: __a = [] __a = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('''utf8''' ) if hasattr(a__ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] ) chunk_id += 1 return data def __lowerCAmelCase ( a__ ) -> List[str]: def _expand_single_ad_tensor(a__ ): if isinstance(a__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(a__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , a__ )
33
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A : str = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
33
1
from __future__ import annotations def lowerCAmelCase_ ( __a , __a , __a , __a ) -> list: """simple docstring""" lowerCamelCase__: Any =[] lowerCamelCase__ , lowerCamelCase__: Any =input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) lowerCamelCase__: str =result + left + right return input_list def lowerCAmelCase_ ( __a ) -> list: """simple docstring""" if len(__a ) <= 1: return input_list lowerCamelCase__: Any =list(__a ) # iteration for two-way merging lowerCamelCase__: str =2 while p <= len(__a ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(__a ) , __a ): lowerCamelCase__: Dict =i lowerCamelCase__: List[str] =i + p - 1 lowerCamelCase__: int =(low + high + 1) // 2 lowerCamelCase__: Optional[int] =merge(__a , __a , __a , __a ) # final merge of last two parts if p * 2 >= len(__a ): lowerCamelCase__: List[Any] =i lowerCamelCase__: Optional[int] =merge(__a , 0 , __a , len(__a ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": __A = input("Enter numbers separated by a comma:\n").strip() if user_input == "": __A = [] else: __A = [int(item.strip()) for item in user_input.split(",")] print(iter_merge_sort(unsorted))
10
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING __A = logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : Tuple , **UpperCAmelCase_ : Tuple) ->Any: '''simple docstring''' super().__init__(**UpperCAmelCase_) if self.framework == "tf": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""") requires_backends(self , "vision") self.check_model_type(UpperCAmelCase_) def __call__(self : Optional[int] , UpperCAmelCase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase_ : Union[str, List[str]] = None , **UpperCAmelCase_ : List[str] , ) ->Union[str, Any]: '''simple docstring''' if "text_queries" in kwargs: lowerCamelCase__: Any =kwargs.pop("text_queries") if isinstance(UpperCAmelCase_ , (str, Image.Image)): lowerCamelCase__: List[Any] ={"image": image, "candidate_labels": candidate_labels} else: lowerCamelCase__: Any =image lowerCamelCase__: Dict =super().__call__(UpperCAmelCase_ , **UpperCAmelCase_) return results def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Union[str, Any]) ->Dict: '''simple docstring''' lowerCamelCase__: List[str] ={} if "threshold" in kwargs: lowerCamelCase__: List[Any] =kwargs["threshold"] if "top_k" in kwargs: lowerCamelCase__: Any =kwargs["top_k"] return {}, {}, postprocess_params def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[Any]) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: List[Any] =load_image(inputs["image"]) lowerCamelCase__: Dict =inputs["candidate_labels"] if isinstance(UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: Any =candidate_labels.split(",") lowerCamelCase__: Optional[int] =torch.tensor([[image.height, image.width]] , dtype=torch.intaa) for i, candidate_label in enumerate(UpperCAmelCase_): lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework) lowerCamelCase__: Union[str, Any] =self.image_processor(UpperCAmelCase_ , return_tensors=self.framework) yield { "is_last": i == len(UpperCAmelCase_) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Dict =model_inputs.pop("target_size") lowerCamelCase__: Dict =model_inputs.pop("candidate_label") lowerCamelCase__: Dict =model_inputs.pop("is_last") lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_) lowerCamelCase__: Dict ={"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} return model_outputs def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : str=None) ->Tuple: '''simple docstring''' lowerCamelCase__: Union[str, Any] =[] for model_output in model_outputs: lowerCamelCase__: Optional[Any] =model_output["candidate_label"] lowerCamelCase__: Tuple =BaseModelOutput(UpperCAmelCase_) lowerCamelCase__: Dict =self.image_processor.post_process_object_detection( outputs=UpperCAmelCase_ , threshold=UpperCAmelCase_ , target_sizes=model_output["target_size"])[0] for index in outputs["scores"].nonzero(): lowerCamelCase__: Dict =outputs["scores"][index].item() lowerCamelCase__: Dict =self._get_bounding_box(outputs["boxes"][index][0]) lowerCamelCase__: Optional[Any] ={"score": score, "label": label, "box": box} results.append(UpperCAmelCase_) lowerCamelCase__: List[str] =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: x["score"] , reverse=UpperCAmelCase_) if top_k: lowerCamelCase__: Dict =results[:top_k] return results def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : "torch.Tensor") ->Dict[str, int]: '''simple docstring''' if self.framework != "pt": raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.") lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =box.int().tolist() lowerCamelCase__: Optional[int] ={ "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
10
1
import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""", # See all DETR models at https://huggingface.co/models?filter=detr } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = """detr""" __lowerCAmelCase = ["""past_key_values"""] __lowerCAmelCase = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : List[Any] , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict=3 , lowerCamelCase_ : Any=100 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : Tuple=2048 , lowerCamelCase_ : List[Any]=8 , lowerCamelCase_ : Optional[int]=6 , lowerCamelCase_ : Tuple=2048 , lowerCamelCase_ : int=8 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : Optional[Any]=0.0 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Union[str, Any]="relu" , lowerCamelCase_ : Tuple=256 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Any=1.0 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : str="sine" , lowerCamelCase_ : Union[str, Any]="resnet50" , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=False , lowerCamelCase_ : Union[str, Any]=1 , lowerCamelCase_ : List[str]=5 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : Optional[Any]=1 , lowerCamelCase_ : int=1 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : List[str]=0.1 , **lowerCamelCase_ : List[Any] , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) UpperCamelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(lowerCamelCase_ , lowerCamelCase_ ): UpperCamelCase = backbone_config.get("""model_type""" ) UpperCamelCase = CONFIG_MAPPING[backbone_model_type] UpperCamelCase = config_class.from_dict(lowerCamelCase_ ) # set timm attributes to None UpperCamelCase , UpperCamelCase , UpperCamelCase = None, None, None UpperCamelCase = use_timm_backbone UpperCamelCase = backbone_config UpperCamelCase = num_channels UpperCamelCase = num_queries UpperCamelCase = d_model UpperCamelCase = encoder_ffn_dim UpperCamelCase = encoder_layers UpperCamelCase = encoder_attention_heads UpperCamelCase = decoder_ffn_dim UpperCamelCase = decoder_layers UpperCamelCase = decoder_attention_heads UpperCamelCase = dropout UpperCamelCase = attention_dropout UpperCamelCase = activation_dropout UpperCamelCase = activation_function UpperCamelCase = init_std UpperCamelCase = init_xavier_std UpperCamelCase = encoder_layerdrop UpperCamelCase = decoder_layerdrop UpperCamelCase = encoder_layers UpperCamelCase = auxiliary_loss UpperCamelCase = position_embedding_type UpperCamelCase = backbone UpperCamelCase = use_pretrained_backbone UpperCamelCase = dilation # Hungarian matcher UpperCamelCase = class_cost UpperCamelCase = bbox_cost UpperCamelCase = giou_cost # Loss coefficients UpperCamelCase = mask_loss_coefficient UpperCamelCase = dice_loss_coefficient UpperCamelCase = bbox_loss_coefficient UpperCamelCase = giou_loss_coefficient UpperCamelCase = eos_coefficient super().__init__(is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ ) @property def lowerCamelCase_ ( self : Dict ): """simple docstring""" return self.encoder_attention_heads @property def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" return self.d_model @classmethod def lowerCamelCase_ ( cls : Tuple , lowerCamelCase_ : PretrainedConfig , **lowerCamelCase_ : List[str] ): """simple docstring""" return cls(backbone_config=lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: UpperCamelCase = self.backbone_config.to_dict() UpperCamelCase = self.__class__.model_type return output class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = version.parse("""1.11""" ) @property def lowerCamelCase_ ( self : Tuple ): """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def lowerCamelCase_ ( self : List[str] ): """simple docstring""" return 1E-5 @property def lowerCamelCase_ ( self : Any ): """simple docstring""" return 12
165
from math import pi def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float: '''simple docstring''' return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(9_0, 1_0))
165
1
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _A : int = _symbol_database.Default() _A : Optional[int] = _descriptor_pool.Default().AddSerializedFile( b'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03''' ) _A : Union[str, Any] = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals) if _descriptor._USE_C_DESCRIPTORS is False: _A : List[str] = None _A : str = b'''H\003''' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _A : Optional[Any] = 45 _A : Optional[int] = 1581 _A : Optional[int] = 1517 _A : Dict = 1570 _A : List[Any] = 1584 _A : Union[str, Any] = 1793 _A : Any = 1795 _A : List[str] = 1916 _A : Dict = 1864 _A : str = 1905 _A : Optional[Any] = 1919 _A : List[str] = 2429 _A : int = 2208 _A : Dict = 2418 _A : str = 2323 _A : Dict = 2407 # @@protoc_insertion_point(module_scope)
229
'''simple docstring''' def UpperCamelCase_ ( snake_case_ : Union[str, Any]=2_81_23 ) -> str: '''simple docstring''' __lowerCAmelCase = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i __lowerCAmelCase = set() __lowerCAmelCase = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(snake_case_ ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
229
1
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=5 ) -> Optional[Any]: # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('''<mask>''' ) == 1 lowercase__: str = torch.tensor(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) ).unsqueeze(0 ) # Batch size 1 lowercase__: int = model(lowercase_ )[0] # The last hidden-state is the first element of the output tuple lowercase__: Union[str, Any] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() lowercase__: Optional[Any] = logits[0, masked_index, :] lowercase__: List[str] = logits.softmax(dim=0 ) lowercase__, lowercase__: Union[str, Any] = prob.topk(k=lowercase_ , dim=0 ) lowercase__: List[str] = ''' '''.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase_ ) )] ) lowercase__: str = tokenizer.mask_token lowercase__: Optional[int] = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ): lowercase__: Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''' ) if " {0}".format(lowercase_ ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(''' {0}'''.format(lowercase_ ) , lowercase_ ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(lowercase_ , lowercase_ ), values[index].item(), predicted_token, ) ) return topk_filled_outputs __A = CamembertTokenizer.from_pretrained("camembert-base") __A = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() __A = '''Le camembert est <mask> :)''' print(fill_mask(masked_input, model, tokenizer, topk=3))
356
"""simple docstring""" import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Union[str, Any] = CTRLTokenizer _UpperCAmelCase :Any = False _UpperCAmelCase :List[Any] = False def _snake_case ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__: Dict = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>'''] lowercase__: Any = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) lowercase__: Optional[int] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', ''''''] lowercase__: Optional[Any] = {'''unk_token''': '''<unk>'''} lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_UpperCAmelCase ) ) def _snake_case ( self , **_UpperCAmelCase ): kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: Optional[int] = '''adapt react readapt apt''' lowercase__: Optional[int] = '''adapt react readapt apt''' return input_text, output_text def _snake_case ( self ): lowercase__: List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase__: Optional[int] = '''adapt react readapt apt''' lowercase__: Any = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split() lowercase__: Optional[Any] = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: int = tokens + [tokenizer.unk_token] lowercase__: str = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
2
0
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def __UpperCAmelCase ( A : jnp.ndarray , A : int , A : float = 1 , A : float = 1 , A : float = 1.0e4 , A : bool = False , A : float = 1.0 , ) -> int: assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"Embedding dimension {embedding_dim} should be even" UpperCAmelCase_ : Optional[int] = float(embedding_dim // 2 ) UpperCAmelCase_ : Tuple = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) UpperCAmelCase_ : List[str] = min_timescale * jnp.exp(jnp.arange(lowerCamelCase__ , dtype=jnp.floataa ) * -log_timescale_increment ) UpperCAmelCase_ : Any = jnp.expand_dims(lowerCamelCase__ , 1 ) * jnp.expand_dims(lowerCamelCase__ , 0 ) # scale embeddings UpperCAmelCase_ : List[str] = scale * emb if flip_sin_to_cos: UpperCAmelCase_ : Optional[int] = jnp.concatenate([jnp.cos(lowerCamelCase__ ), jnp.sin(lowerCamelCase__ )] , axis=1 ) else: UpperCAmelCase_ : str = jnp.concatenate([jnp.sin(lowerCamelCase__ ), jnp.cos(lowerCamelCase__ )] , axis=1 ) UpperCAmelCase_ : Optional[int] = jnp.reshape(lowerCamelCase__ , [jnp.shape(lowerCamelCase__ )[0], embedding_dim] ) return signal class snake_case__ ( nn.Module): a_ = 32 a_ = jnp.floataa @nn.compact def __call__( self : Dict , _A : Tuple ) -> int: UpperCAmelCase_ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(_A ) UpperCAmelCase_ : Optional[int] = nn.silu(_A ) UpperCAmelCase_ : Any = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(_A ) return temb class snake_case__ ( nn.Module): a_ = 32 a_ = False a_ = 1 @nn.compact def __call__( self : Any , _A : Union[str, Any] ) -> int: return get_sinusoidal_embeddings( _A , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
304
import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin UpperCAmelCase : Optional[int] = random.Random() def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any]=1.0 , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Dict=None ): '''simple docstring''' if rng is None: lowerCamelCase = global_rng lowerCamelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class __lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self , A , A=7 , A=4_00 , A=20_00 , A=1 , A=0.0 , A=1_60_00 , A=True , A=True , ) -> Optional[Any]: '''simple docstring''' lowerCamelCase = parent lowerCamelCase = batch_size lowerCamelCase = min_seq_length lowerCamelCase = max_seq_length lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCamelCase = feature_size lowerCamelCase = padding_value lowerCamelCase = sampling_rate lowerCamelCase = return_attention_mask lowerCamelCase = do_normalize def __A ( self ) -> Any: '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __A ( self , A=False , A=False ) -> Any: '''simple docstring''' def _flatten(A ): return list(itertools.chain(*A ) ) if equal_length: lowerCamelCase = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size lowerCamelCase = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowerCamelCase = [np.asarray(A ) for x in speech_inputs] return speech_inputs class __lowercase ( a_ , unittest.TestCase ): """simple docstring""" UpperCamelCase : Optional[int] = WavaVecaFeatureExtractor def __A ( self ) -> List[str]: '''simple docstring''' lowerCamelCase = WavaVecaFeatureExtractionTester(self ) def __A ( self , A ) -> Any: '''simple docstring''' self.assertTrue(np.all(np.mean(A , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(A , axis=0 ) - 1 ) < 1e-3 ) ) def __A ( self ) -> Dict: '''simple docstring''' lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCamelCase = [np.asarray(A ) for speech_input in speech_inputs] # Test not batched input lowerCamelCase = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values lowerCamelCase = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values self.assertTrue(np.allclose(A , A , atol=1e-3 ) ) # Test batched lowerCamelCase = feat_extract(A , return_tensors="""np""" ).input_values lowerCamelCase = feat_extract(A , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(A , A ): self.assertTrue(np.allclose(A , A , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. lowerCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] lowerCamelCase = np.asarray(A ) lowerCamelCase = feat_extract(A , return_tensors="""np""" ).input_values lowerCamelCase = feat_extract(A , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(A , A ): self.assertTrue(np.allclose(A , A , atol=1e-3 ) ) def __A ( self ) -> List[Any]: '''simple docstring''' lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCamelCase = ["""longest""", """max_length""", """do_not_pad"""] lowerCamelCase = [None, 16_00, None] for max_length, padding in zip(A , A ): lowerCamelCase = feat_extract(A , padding=A , max_length=A , return_tensors="""np""" ) lowerCamelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_00] ) self.assertTrue(input_values[0][8_00:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:10_00] ) self.assertTrue(input_values[0][10_00:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:12_00] ) def __A ( self ) -> Optional[int]: '''simple docstring''' lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase = range(8_00 , 14_00 , 2_00 ) lowerCamelCase = [floats_list((1, x) )[0] for x in lengths] lowerCamelCase = ["""longest""", """max_length""", """do_not_pad"""] lowerCamelCase = [None, 16_00, None] for max_length, padding in zip(A , A ): lowerCamelCase = feat_extract(A , max_length=A , padding=A ) lowerCamelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_00] ) self._check_zero_mean_unit_variance(input_values[1][:10_00] ) self._check_zero_mean_unit_variance(input_values[2][:12_00] ) def __A ( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCamelCase = feat_extract( A , truncation=A , max_length=10_00 , padding="""max_length""" , return_tensors="""np""" ) lowerCamelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def __A ( self ) -> Optional[int]: '''simple docstring''' lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCamelCase = feat_extract( A , truncation=A , max_length=10_00 , padding="""longest""" , return_tensors="""np""" ) lowerCamelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00] ) self._check_zero_mean_unit_variance(input_values[1, :10_00] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 10_00) ) lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCamelCase = feat_extract( A , truncation=A , max_length=20_00 , padding="""longest""" , return_tensors="""np""" ) lowerCamelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00] ) self._check_zero_mean_unit_variance(input_values[1, :10_00] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 12_00) ) @require_torch def __A ( self ) -> Optional[int]: '''simple docstring''' import torch lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase = np.random.rand(1_00 ).astype(np.floataa ) lowerCamelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowerCamelCase = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) lowerCamelCase = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def __A ( self ) -> str: '''simple docstring''' for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: lowerCamelCase = WavaVecaConfig.from_pretrained(A ) lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(A ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
252
0
def lowerCAmelCase_ ( __lowerCamelCase ): __snake_case : Union[str, Any] = [0] * len(__lowerCamelCase ) for i in range(1 , len(__lowerCamelCase ) ): # use last results for better performance - dynamic programming __snake_case : Dict = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: __snake_case : List[Any] = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 __snake_case : Optional[int] = j return prefix_result def lowerCAmelCase_ ( __lowerCamelCase ): return max(prefix_function(__lowerCamelCase ) ) if __name__ == "__main__": import doctest doctest.testmod()
134
import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case : Union[str, Any] = logging.get_logger(__name__) _snake_case : List[str] = { "facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json", "facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json", } class a (_lowerCAmelCase ): """simple docstring""" __UpperCAmelCase : Tuple = "encodec" def __init__( self : Any , lowerCamelCase : Optional[int]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCamelCase : List[str]=24000 , lowerCamelCase : int=1 , lowerCamelCase : Optional[int]=False , lowerCamelCase : Dict=None , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=128 , lowerCamelCase : Optional[int]=32 , lowerCamelCase : List[str]=1 , lowerCamelCase : str=[8, 5, 4, 2] , lowerCamelCase : List[str]="weight_norm" , lowerCamelCase : Any=7 , lowerCamelCase : Tuple=7 , lowerCamelCase : int=3 , lowerCamelCase : int=2 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[Any]="reflect" , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : int=1.0 , lowerCamelCase : Optional[Any]=1024 , lowerCamelCase : Optional[Any]=None , lowerCamelCase : str=True , **lowerCamelCase : Dict , ) -> Any: __snake_case : Tuple = target_bandwidths __snake_case : Union[str, Any] = sampling_rate __snake_case : Union[str, Any] = audio_channels __snake_case : Dict = normalize __snake_case : List[Any] = chunk_length_s __snake_case : Tuple = overlap __snake_case : Optional[int] = hidden_size __snake_case : List[Any] = num_filters __snake_case : Union[str, Any] = num_residual_layers __snake_case : Optional[int] = upsampling_ratios __snake_case : List[str] = norm_type __snake_case : Optional[int] = kernel_size __snake_case : Dict = last_kernel_size __snake_case : Tuple = residual_kernel_size __snake_case : List[Any] = dilation_growth_rate __snake_case : Optional[int] = use_causal_conv __snake_case : Tuple = pad_mode __snake_case : Union[str, Any] = compress __snake_case : Union[str, Any] = num_lstm_layers __snake_case : int = trim_right_ratio __snake_case : Tuple = codebook_size __snake_case : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size __snake_case : int = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( F'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' ) super().__init__(**lowerCamelCase ) @property def __snake_case ( self : int ) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __snake_case ( self : Union[str, Any] ) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def __snake_case ( self : Optional[Any] ) -> int: __snake_case : Union[str, Any] = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def __snake_case ( self : Optional[Any] ) -> int: return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
134
1
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel a_ :Optional[Any] = logging.getLogger(__name__) def lowercase_ (A : List[Any] , A : List[Any] ): # save results if os.path.exists(A ): if os.path.exists(os.path.join(A , 'config.json' ) ) and os.path.isfile( os.path.join(A , 'config.json' ) ): os.remove(os.path.join(A , 'config.json' ) ) if os.path.exists(os.path.join(A , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(A , 'pytorch_model.bin' ) ): os.remove(os.path.join(A , 'pytorch_model.bin' ) ) else: os.makedirs(A ) model.save_pretrained(A ) def lowercase_ (A : Any , A : Optional[Any]=False ): snake_case__ : str = 2 if unlogit: snake_case__ : Dict = torch.pow(A , A ) snake_case__ : Any = p * torch.log(A ) snake_case__ : Tuple = 0 return -plogp.sum(dim=-1 ) def lowercase_ (A : List[str] ): logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(A ) ) ) ) for row in range(len(A ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def lowercase_ (A : Tuple , A : Optional[Any] , A : str , A : int=True , A : Optional[int]=True , A : Any=None , A : int=False ): snake_case__ , snake_case__ : Optional[Any] = model.config.num_hidden_layers, model.config.num_attention_heads snake_case__ : int = torch.zeros(A , A ).to(args.device ) snake_case__ : Any = torch.zeros(A , A ).to(args.device ) if head_mask is None: snake_case__ : Dict = torch.ones(A , A ).to(args.device ) head_mask.requires_grad_(requires_grad=A ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: snake_case__ : Optional[int] = None snake_case__ : List[Any] = 0.0 snake_case__ : str = 0.0 for step, inputs in enumerate(tqdm(A , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): snake_case__ : Union[str, Any] = tuple(t.to(args.device ) for t in inputs ) ((snake_case__) , ) : Optional[Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) snake_case__ : Union[str, Any] = model(A , labels=A , head_mask=A ) # (loss), lm_logits, presents, (all hidden_states), (attentions) snake_case__ , snake_case__ , snake_case__ : Dict = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(A ): snake_case__ : Optional[Any] = entropy(attn.detach() , A ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(A ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: snake_case__ : Union[str, Any] = 2 snake_case__ : List[Any] = torch.pow(torch.pow(A , A ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: snake_case__ : Tuple = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(A ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(A ) logger.info('Head ranked by importance scores' ) snake_case__ : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) snake_case__ : Union[str, Any] = torch.arange( head_importance.numel() , device=args.device ) snake_case__ : str = head_ranks.view_as(A ) print_ad_tensor(A ) return attn_entropy, head_importance, total_loss def lowercase_ (A : Optional[int] , A : Dict , A : Optional[int] ): snake_case__ , snake_case__ , snake_case__ : Any = compute_heads_importance(A , A , A , compute_entropy=A ) snake_case__ : Tuple = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , A , original_score * args.masking_threshold ) snake_case__ : Optional[Any] = torch.ones_like(A ) snake_case__ : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) snake_case__ : Dict = original_score while current_score >= original_score * args.masking_threshold: snake_case__ : int = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads snake_case__ : List[Any] = float('Inf' ) snake_case__ : Union[str, Any] = head_importance.view(-1 ).sort()[1] if len(A ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads snake_case__ : int = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) snake_case__ : int = new_head_mask.view(-1 ) snake_case__ : int = 0.0 snake_case__ : Union[str, Any] = new_head_mask.view_as(A ) snake_case__ : List[str] = new_head_mask.clone().detach() print_ad_tensor(A ) # Compute metric and head importance again snake_case__ , snake_case__ , snake_case__ : Any = compute_heads_importance( A , A , A , compute_entropy=A , head_mask=A ) snake_case__ : Dict = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , ) logger.info('Final head mask' ) print_ad_tensor(A ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def lowercase_ (A : List[str] , A : Tuple , A : Optional[Any] , A : int ): snake_case__ : Any = datetime.now() snake_case__ , snake_case__ , snake_case__ : str = compute_heads_importance( A , A , A , compute_entropy=A , compute_importance=A , head_mask=A ) snake_case__ : Tuple = 1 / loss snake_case__ : Dict = datetime.now() - before_time snake_case__ : Union[str, Any] = sum(p.numel() for p in model.parameters() ) snake_case__ : Optional[Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A ) ) } for k, v in heads_to_prune.items(): if isinstance(A , A ): snake_case__ : Any = [ v, ] assert sum(len(A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(A ) snake_case__ : Dict = sum(p.numel() for p in model.parameters() ) snake_case__ : Tuple = datetime.now() snake_case__ , snake_case__ , snake_case__ : Dict = compute_heads_importance( A , A , A , compute_entropy=A , compute_importance=A , head_mask=A , actually_pruned=A , ) snake_case__ : Any = 1 / loss snake_case__ : int = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , A , A , pruned_num_params / original_num_params * 1_0_0 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , A , A ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 ) save_model(A , args.output_dir ) def lowercase_ (): snake_case__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=A , type=A , required=A , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=A , type=A , required=A , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=A , type=A , required=A , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=A , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=A , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=A , type=A , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=A , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=A , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=A , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=A , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=1_2_8 , type=A , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=A , help='Batch size.' ) parser.add_argument('--seed' , type=A , default=4_2 ) parser.add_argument('--local_rank' , type=A , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=A , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=A , default='' , help='Can be used for distant debugging.' ) snake_case__ : Optional[int] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: snake_case__ : List[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) snake_case__ : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) snake_case__ : int = torch.device('cuda' , args.local_rank ) snake_case__ : List[str] = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) snake_case__ : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: snake_case__ : List[str] = nn.parallel.DistributedDataParallel( A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A ) elif args.n_gpu > 1: snake_case__ : Optional[int] = nn.DataParallel(A ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=A ) torch.save(A , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , A ) # Prepare dataset snake_case__ : Optional[Any] = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) snake_case__ : List[str] = (torch.from_numpy(A ),) snake_case__ : int = TensorDataset(*A ) snake_case__ : Union[str, Any] = RandomSampler(A ) snake_case__ : Any = DataLoader(A , sampler=A , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(A , A , A ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: snake_case__ : Dict = mask_heads(A , A , A ) prune_heads(A , A , A , A ) if __name__ == "__main__": main()
277
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ :Optional[int] = logging.get_logger(__name__) a_ :Dict = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"} class snake_case__ ( lowerCAmelCase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = """openai-gpt""" _SCREAMING_SNAKE_CASE = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Optional[int], _snake_case : Dict=4_0_4_7_8, _snake_case : str=5_1_2, _snake_case : int=7_6_8, _snake_case : Tuple=1_2, _snake_case : Any=1_2, _snake_case : str="gelu", _snake_case : List[str]=0.1, _snake_case : Any=0.1, _snake_case : Dict=0.1, _snake_case : int=1e-5, _snake_case : Optional[Any]=0.0_2, _snake_case : List[Any]="cls_index", _snake_case : Any=True, _snake_case : Any=None, _snake_case : int=True, _snake_case : Optional[Any]=0.1, **_snake_case : List[Any], ) ->Optional[int]: snake_case__ : int = vocab_size snake_case__ : Dict = n_positions snake_case__ : str = n_embd snake_case__ : str = n_layer snake_case__ : List[Any] = n_head snake_case__ : List[Any] = afn snake_case__ : Optional[Any] = resid_pdrop snake_case__ : List[str] = embd_pdrop snake_case__ : List[Any] = attn_pdrop snake_case__ : Optional[int] = layer_norm_epsilon snake_case__ : str = initializer_range snake_case__ : List[str] = summary_type snake_case__ : Optional[int] = summary_use_proj snake_case__ : List[str] = summary_activation snake_case__ : Optional[Any] = summary_first_dropout snake_case__ : int = summary_proj_to_labels super().__init__(**_snake_case )
277
1
"""simple docstring""" import os from datetime import datetime as dt from github import Github UpperCamelCase_ =[ """good first issue""", """good second issue""", """good difficult issue""", """enhancement""", """new pipeline/model""", """new scheduler""", """wip""", ] def a_ ( ): _UpperCamelCase : str = Github(os.environ['''GITHUB_TOKEN'''] ) _UpperCamelCase : List[Any] = g.get_repo('''huggingface/diffusers''' ) _UpperCamelCase : Any = repo.get_issues(state='''open''' ) for issue in open_issues: _UpperCamelCase : Optional[int] = sorted(issue.get_comments() , key=lambda _lowercase : i.created_at , reverse=_lowercase ) _UpperCamelCase : Optional[Any] = comments[0] if len(_lowercase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='''closed''' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='''open''' ) issue.remove_from_labels('''stale''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) issue.add_to_labels('''stale''' ) if __name__ == "__main__": main()
128
"""simple docstring""" import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def a_ ( _lowercase , _lowercase , _lowercase ): # Initialise PyTorch model _UpperCamelCase : List[Any] = MobileBertConfig.from_json_file(_lowercase ) print(F"""Building PyTorch model from configuration: {config}""" ) _UpperCamelCase : List[str] = MobileBertForPreTraining(_lowercase ) # Load weights from tf checkpoint _UpperCamelCase : Union[str, Any] = load_tf_weights_in_mobilebert(_lowercase , _lowercase , _lowercase ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , _lowercase ) if __name__ == "__main__": UpperCamelCase_ =argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--mobilebert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained MobileBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCamelCase_ =parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
128
1
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int: snake_case : str = len(__lowerCamelCase ) snake_case : Any = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): snake_case : List[Any] = True # sum is not zero and set is empty then false for i in range(1 ,required_sum + 1 ): snake_case : Tuple = False for i in range(1 ,arr_len + 1 ): for j in range(1 ,required_sum + 1 ): if arr[i - 1] > j: snake_case : Dict = subset[i - 1][j] if arr[i - 1] <= j: snake_case : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
124
from __future__ import annotations from collections.abc import Callable __UpperCAmelCase = list[list[float | int]] def A__ ( __lowerCamelCase, __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )] SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 for row in range(__lowerCamelCase ): for col in range(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ = matrix[row][col] SCREAMING_SNAKE_CASE_ = vector[row][0] SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 while row < size and col < size: # pivoting SCREAMING_SNAKE_CASE_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase, __lowerCamelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = augmented[pivot_row], augmented[row] for rowa in range(row + 1, __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = augmented[rowa][col] / augmented[row][col] SCREAMING_SNAKE_CASE_ = 0 for cola in range(col + 1, size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1, __lowerCamelCase ): for row in range(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ = augmented[row][col] / augmented[col][col] for cola in range(__lowerCamelCase, size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row], 10 )] for row in range(__lowerCamelCase ) ] def A__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )] SCREAMING_SNAKE_CASE_ = [[0] for _ in range(__lowerCamelCase )] SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 for x_val, y_val in enumerate(__lowerCamelCase ): for col in range(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ = (x_val + 1) ** (size - col - 1) SCREAMING_SNAKE_CASE_ = y_val SCREAMING_SNAKE_CASE_ = solve(__lowerCamelCase, __lowerCamelCase ) def interpolated_func(__lowerCamelCase ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(__lowerCamelCase ) ) return interpolated_func def A__ ( __lowerCamelCase ): return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def A__ ( __lowerCamelCase = question_function, __lowerCamelCase = 10 ): SCREAMING_SNAKE_CASE_ = [func(__lowerCamelCase ) for x_val in range(1, order + 1 )] SCREAMING_SNAKE_CASE_ = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 ) ] SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 for poly in polynomials: SCREAMING_SNAKE_CASE_ = 1 while func(__lowerCamelCase ) == poly(__lowerCamelCase ): x_val += 1 ret += poly(__lowerCamelCase ) return ret if __name__ == "__main__": print(F"""{solution() = }""")
299
0
"""simple docstring""" from __future__ import annotations __A : List[str] = tuple[int, int, int] __A : Dict = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase __A : Any = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' # -------------------------- default selection -------------------------- # rotors -------------------------- __A : List[Any] = 'EGZWVONAHDCLFQMSIPJBYUKXTR' __A : List[str] = 'FOBHMDKEXQNRAULPGSJVTYICZW' __A : Union[str, Any] = 'ZJXESIUQLHAVRMDOYGTNFWPBKC' # reflector -------------------------- __A : Optional[Any] = { 'A': 'N', 'N': 'A', 'B': 'O', 'O': 'B', 'C': 'P', 'P': 'C', 'D': 'Q', 'Q': 'D', 'E': 'R', 'R': 'E', 'F': 'S', 'S': 'F', 'G': 'T', 'T': 'G', 'H': 'U', 'U': 'H', 'I': 'V', 'V': 'I', 'J': 'W', 'W': 'J', 'K': 'X', 'X': 'K', 'L': 'Y', 'Y': 'L', 'M': 'Z', 'Z': 'M', } # -------------------------- extra rotors -------------------------- __A : int = 'RMDJXFUWGISLHVTCQNKYPBEZOA' __A : Dict = 'SGLCPQWZHKXAREONTFBVIYJUDM' __A : Optional[Any] = 'HVSICLTYKQUBXDWAJZOMFGPREN' __A : Optional[int] = 'RZWQHFMVDBKICJLNTUXAGYPSOE' __A : str = 'LFKIJODBEGAMQPXVUHYSTCZRWN' __A : Union[str, Any] = 'KOAEGVDHXPQZMLFTYWJNBRCIUS' def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ): """simple docstring""" # Checks if there are 3 unique rotors if (unique_rotsel := len(set(lowercase__ ) )) < 3: A = F"""Please use 3 unique rotors (not {unique_rotsel})""" raise Exception(lowercase__ ) # Checks if rotor positions are valid A , A , A = rotpos if not 0 < rotorposa <= len(lowercase__ ): A = F"""First rotor position is not within range of 1..26 ({rotorposa}""" raise ValueError(lowercase__ ) if not 0 < rotorposa <= len(lowercase__ ): A = F"""Second rotor position is not within range of 1..26 ({rotorposa})""" raise ValueError(lowercase__ ) if not 0 < rotorposa <= len(lowercase__ ): A = F"""Third rotor position is not within range of 1..26 ({rotorposa})""" raise ValueError(lowercase__ ) # Validates string and returns dict A = _plugboard(lowercase__ ) return rotpos, rotsel, pbdict def __SCREAMING_SNAKE_CASE ( lowercase__ ): """simple docstring""" # tests the input string if it # a) is type string # b) has even length (so pairs can be made) if not isinstance(lowercase__ , lowercase__ ): A = F"""Plugboard setting isn't type string ({type(lowercase__ )})""" raise TypeError(lowercase__ ) elif len(lowercase__ ) % 2 != 0: A = F"""Odd number of symbols ({len(lowercase__ )})""" raise Exception(lowercase__ ) elif pbstring == "": return {} pbstring.replace(" " , "" ) # Checks if all characters are unique A = set() for i in pbstring: if i not in abc: A = F"""'{i}' not in list of symbols""" raise Exception(lowercase__ ) elif i in tmppbl: A = F"""Duplicate symbol ({i})""" raise Exception(lowercase__ ) else: tmppbl.add(lowercase__ ) del tmppbl # Created the dictionary A = {} for j in range(0 , len(lowercase__ ) - 1 , 2 ): A = pbstring[j + 1] A = pbstring[j] return pb def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ = (rotora, rotora, rotora) , lowercase__ = "" , ): """simple docstring""" A = text.upper() A , A , A = _validator( lowercase__ , lowercase__ , plugb.upper() ) A , A , A = rotor_position A , A , A = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 A = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: A = plugboard[symbol] # rotor ra -------------------------- A = abc.index(lowercase__ ) + rotorposa A = rotora[index % len(lowercase__ )] # rotor rb -------------------------- A = abc.index(lowercase__ ) + rotorposa A = rotora[index % len(lowercase__ )] # rotor rc -------------------------- A = abc.index(lowercase__ ) + rotorposa A = rotora[index % len(lowercase__ )] # reflector -------------------------- # this is the reason you don't need another machine to decipher A = reflector[symbol] # 2nd rotors A = abc[rotora.index(lowercase__ ) - rotorposa] A = abc[rotora.index(lowercase__ ) - rotorposa] A = abc[rotora.index(lowercase__ ) - rotorposa] # 2nd plugboard if symbol in plugboard: A = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(lowercase__ ): A = 0 rotorposa += 1 if rotorposa >= len(lowercase__ ): A = 0 rotorposa += 1 if rotorposa >= len(lowercase__ ): A = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(lowercase__ ) return "".join(lowercase__ ) if __name__ == "__main__": __A : Any = 'This is my Python script that emulates the Enigma machine from WWII.' __A : str = (1, 1, 1) __A : str = 'pictures' __A : Tuple = (rotora, rotora, rotora) __A : Any = enigma(message, rotor_pos, rotor_sel, pb) print('Encrypted message:', en) print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
57
"""simple docstring""" from __future__ import annotations class __UpperCamelCase : def __init__(self : Tuple , __SCREAMING_SNAKE_CASE : int = 0): A = key def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int): assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) A = key or self.__key or 1 # make sure key is an appropriate size key %= 2_5_5 return [chr(ord(__SCREAMING_SNAKE_CASE) ^ key) for ch in content] def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int): assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) A = key or self.__key or 1 # make sure key is an appropriate size key %= 2_5_5 return [chr(ord(__SCREAMING_SNAKE_CASE) ^ key) for ch in content] def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 0): assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) A = key or self.__key or 1 # make sure key can be any size while key > 2_5_5: key -= 2_5_5 # This will be returned A = "" for ch in content: ans += chr(ord(__SCREAMING_SNAKE_CASE) ^ key) return ans def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 0): assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) A = key or self.__key or 1 # make sure key can be any size while key > 2_5_5: key -= 2_5_5 # This will be returned A = "" for ch in content: ans += chr(ord(__SCREAMING_SNAKE_CASE) ^ key) return ans def SCREAMING_SNAKE_CASE__ (self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 0): assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) try: with open(__SCREAMING_SNAKE_CASE) as fin, open("encrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)) except OSError: return False return True def SCREAMING_SNAKE_CASE__ (self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int): assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) try: with open(__SCREAMING_SNAKE_CASE) as fin, open("decrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
57
1
def UpperCAmelCase_ ( __snake_case ) -> str: """simple docstring""" _lowercase =0 # if input_string is "aba" than new_input_string become "a|b|a" _lowercase ='''''' _lowercase ='''''' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(__snake_case ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _lowercase , _lowercase =0, 0 # length[i] shows the length of palindromic substring with center i _lowercase =[1 for i in range(len(__snake_case ) )] # for each character in new_string find corresponding palindromic string _lowercase =0 for j in range(len(__snake_case ) ): _lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(__snake_case ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _lowercase =2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _lowercase =j - k + 1 # noqa: E741 _lowercase =j + k - 1 # update max_length and start position if max_length < length[j]: _lowercase =length[j] _lowercase =j # create that string _lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
5
"""simple docstring""" from __future__ import annotations def snake_case_ ( A_ : str ): '''simple docstring''' return [ord(A_ ) - 96 for elem in plain] def snake_case_ ( A_ : list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''', A_ ) print('''Decoded:''', decode(A_ ) ) if __name__ == "__main__": main()
72
0
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _lowerCAmelCase( unittest.TestCase ): """simple docstring""" @property def _a ( self ): torch.manual_seed(0 ) UpperCamelCase_: Dict = UNetaDModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) return model def _a ( self ): UpperCamelCase_: Tuple = self.dummy_uncond_unet UpperCamelCase_: int = PNDMScheduler() UpperCamelCase_: Any = PNDMPipeline(unet=a_ , scheduler=a_ ) pndm.to(a_ ) pndm.set_progress_bar_config(disable=a_ ) UpperCamelCase_: Union[str, Any] = torch.manual_seed(0 ) UpperCamelCase_: Dict = pndm(generator=a_ , num_inference_steps=2_0 , output_type='numpy' ).images UpperCamelCase_: Optional[int] = torch.manual_seed(0 ) UpperCamelCase_: Dict = pndm(generator=a_ , num_inference_steps=2_0 , output_type='numpy' , return_dict=a_ )[0] UpperCamelCase_: str = image[0, -3:, -3:, -1] UpperCamelCase_: List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) UpperCamelCase_: List[Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class _lowerCAmelCase( unittest.TestCase ): """simple docstring""" def _a ( self ): UpperCamelCase_: List[Any] = 'google/ddpm-cifar10-32' UpperCamelCase_: List[str] = UNetaDModel.from_pretrained(a_ ) UpperCamelCase_: Tuple = PNDMScheduler() UpperCamelCase_: List[str] = PNDMPipeline(unet=a_ , scheduler=a_ ) pndm.to(a_ ) pndm.set_progress_bar_config(disable=a_ ) UpperCamelCase_: Union[str, Any] = torch.manual_seed(0 ) UpperCamelCase_: Any = pndm(generator=a_ , output_type='numpy' ).images UpperCamelCase_: Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) UpperCamelCase_: int = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
359
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int: while b: UpperCamelCase_ ,UpperCamelCase_: int = b, a % b return a def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int: return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase__ , a % b ) def snake_case () -> int: print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' ) print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' ) print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' ) print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' ) print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' ) print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' ) print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' ) print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' ) if __name__ == "__main__": main()
292
0
"""simple docstring""" import collections import importlib.util import os import re from pathlib import Path __A : int = '''src/transformers''' # Matches is_xxx_available() __A : Optional[int] = re.compile(R'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} __A : Union[str, Any] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __A : int = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available __A : Optional[int] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") __A : List[Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __A : Optional[int] = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", __A : Optional[int] = re.compile('''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], __A : Union[str, Any] = re.compile('''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo __A : Union[str, Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: __A : Union[str, Any] = re.compile(R'''^\s*try:''') # Catches a line with else: __A : str = re.compile(R'''^\s*else:''') def lowercase ( __snake_case : int ): if _re_test_backend.search(__snake_case ) is None: return None lowercase_ : Dict = [b[0] for b in _re_backend.findall(__snake_case )] backends.sort() return "_and_".join(__snake_case ) def lowercase ( __snake_case : List[Any] ): with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowercase_ : str = f.readlines() lowercase_ : str = 0 while line_index < len(__snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(__snake_case ): return None # First grab the objects without a specific backend in _import_structure lowercase_ : Union[str, Any] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowercase_ : List[Any] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(__snake_case ): lowercase_ : Union[str, Any] = _re_one_line_import_struct.search(__snake_case ).groups()[0] lowercase_ : int = re.findall('''\[([^\]]+)\]''' , __snake_case ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowercase_ : Union[str, Any] = _re_import_struct_key_value.search(__snake_case ) if single_line_import_search is not None: lowercase_ : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__snake_case ) > 0] objects.extend(__snake_case ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowercase_ : Optional[int] = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowercase_ : Any = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowercase_ : Any = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowercase_ : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowercase_ : Optional[int] = lines[line_index] if _re_import_struct_add_one.search(__snake_case ) is not None: objects.append(_re_import_struct_add_one.search(__snake_case ).groups()[0] ) elif _re_import_struct_add_many.search(__snake_case ) is not None: lowercase_ : int = _re_import_struct_add_many.search(__snake_case ).groups()[0].split(''', ''' ) lowercase_ : Optional[int] = [obj[1:-1] for obj in imports if len(__snake_case ) > 0] objects.extend(__snake_case ) elif _re_between_brackets.search(__snake_case ) is not None: lowercase_ : List[Any] = _re_between_brackets.search(__snake_case ).groups()[0].split(''', ''' ) lowercase_ : List[Any] = [obj[1:-1] for obj in imports if len(__snake_case ) > 0] objects.extend(__snake_case ) elif _re_quote_object.search(__snake_case ) is not None: objects.append(_re_quote_object.search(__snake_case ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 1_2 + '''"''' ): objects.append(line[1_3:-3] ) line_index += 1 lowercase_ : str = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowercase_ : Union[str, Any] = [] while ( line_index < len(__snake_case ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowercase_ : str = lines[line_index] lowercase_ : int = _re_import.search(__snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowercase_ : List[Any] = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(__snake_case ): # If the line is an if is_backend_available, we grab all objects associated. lowercase_ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowercase_ : List[str] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowercase_ : Any = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowercase_ : Union[str, Any] = lines[line_index] lowercase_ : List[Any] = _re_import.search(__snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 1_2 ): objects.append(line[1_2:-2] ) line_index += 1 lowercase_ : Optional[int] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def lowercase ( __snake_case : Any , __snake_case : List[str] ): def find_duplicates(__snake_case : Tuple ): return [k for k, v in collections.Counter(__snake_case ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowercase_ : List[str] = [] for key in import_dict_objects.keys(): lowercase_ : str = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) lowercase_ : Optional[int] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowercase_ : str = '''base imports''' if key == '''none''' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def lowercase ( ): lowercase_ : Optional[int] = [] for root, _, files in os.walk(__snake_case ): if "__init__.py" in files: lowercase_ : List[str] = os.path.join(__snake_case , '''__init__.py''' ) lowercase_ : Tuple = parse_init(__snake_case ) if objects is not None: lowercase_ : Optional[int] = analyze_results(*__snake_case ) if len(__snake_case ) > 0: lowercase_ : Tuple = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(__snake_case ) ) if len(__snake_case ) > 0: raise ValueError('''\n\n'''.join(__snake_case ) ) def lowercase ( ): lowercase_ : List[Any] = [] for path, directories, files in os.walk(__snake_case ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(__snake_case ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(__snake_case ) / folder).glob('''*.py''' ) ) ) == 0: continue lowercase_ : List[str] = str((Path(__snake_case ) / folder).relative_to(__snake_case ) ) lowercase_ : int = short_path.replace(os.path.sep , '''.''' ) submodules.append(__snake_case ) for fname in files: if fname == "__init__.py": continue lowercase_ : Optional[Any] = str((Path(__snake_case ) / fname).relative_to(__snake_case ) ) lowercase_ : Optional[int] = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(__snake_case ) return submodules __A : List[Any] = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', ] def lowercase ( ): # This is to make sure the transformers module imported is the one in the repo. lowercase_ : Optional[int] = importlib.util.spec_from_file_location( '''transformers''' , os.path.join(__snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) lowercase_ : Union[str, Any] = spec.loader.load_module() lowercase_ : Optional[int] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(__snake_case ) > 0: lowercase_ : str = '''\n'''.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registered in the main init of Transformers:\n''' F'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
33
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig __A : Union[str, Any] = logging.get_logger(__name__) # General docstring __A : Tuple = '''MobileNetV1Config''' # Base docstring __A : Union[str, Any] = '''google/mobilenet_v1_1.0_224''' __A : Union[str, Any] = [1, 1_024, 7, 7] # Image classification docstring __A : Optional[Any] = '''google/mobilenet_v1_1.0_224''' __A : List[Any] = '''tabby, tabby cat''' __A : Union[str, Any] = [ '''google/mobilenet_v1_1.0_224''', '''google/mobilenet_v1_0.75_192''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def lowercase ( __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Dict=None ): lowercase_ : str = {} if isinstance(__snake_case , __snake_case ): lowercase_ : Union[str, Any] = model.mobilenet_va else: lowercase_ : Optional[Any] = model lowercase_ : Union[str, Any] = '''MobilenetV1/Conv2d_0/''' lowercase_ : Union[str, Any] = backbone.conv_stem.convolution.weight lowercase_ : Optional[Any] = backbone.conv_stem.normalization.bias lowercase_ : Union[str, Any] = backbone.conv_stem.normalization.weight lowercase_ : Any = backbone.conv_stem.normalization.running_mean lowercase_ : int = backbone.conv_stem.normalization.running_var for i in range(1_3 ): lowercase_ : Optional[int] = i + 1 lowercase_ : Union[str, Any] = i * 2 lowercase_ : Optional[Any] = backbone.layer[pt_index] lowercase_ : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/''' lowercase_ : str = pointer.convolution.weight lowercase_ : int = pointer.normalization.bias lowercase_ : Any = pointer.normalization.weight lowercase_ : Dict = pointer.normalization.running_mean lowercase_ : Union[str, Any] = pointer.normalization.running_var lowercase_ : Any = backbone.layer[pt_index + 1] lowercase_ : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/''' lowercase_ : int = pointer.convolution.weight lowercase_ : str = pointer.normalization.bias lowercase_ : Tuple = pointer.normalization.weight lowercase_ : Dict = pointer.normalization.running_mean lowercase_ : Any = pointer.normalization.running_var if isinstance(__snake_case , __snake_case ): lowercase_ : Optional[Any] = '''MobilenetV1/Logits/Conv2d_1c_1x1/''' lowercase_ : Any = model.classifier.weight lowercase_ : Optional[int] = model.classifier.bias return tf_to_pt_map def lowercase ( __snake_case : Optional[int] , __snake_case : int , __snake_case : Dict ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( '''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ''' '''https://www.tensorflow.org/install/ for installation instructions.''' ) raise # Load weights from TF model lowercase_ : Tuple = tf.train.list_variables(__snake_case ) lowercase_ : int = {} for name, shape in init_vars: logger.info(F'''Loading TF weight {name} with shape {shape}''' ) lowercase_ : Optional[Any] = tf.train.load_variable(__snake_case , __snake_case ) lowercase_ : Optional[int] = array # Build TF to PyTorch weights loading map lowercase_ : Any = _build_tf_to_pytorch_map(__snake_case , __snake_case , __snake_case ) for name, pointer in tf_to_pt_map.items(): logger.info(F'''Importing {name}''' ) if name not in tf_weights: logger.info(F'''{name} not in tf pre-trained weights, skipping''' ) continue lowercase_ : Union[str, Any] = tf_weights[name] if "depthwise_weights" in name: logger.info('''Transposing depthwise''' ) lowercase_ : Any = np.transpose(__snake_case , (2, 3, 0, 1) ) elif "weights" in name: logger.info('''Transposing''' ) if len(pointer.shape ) == 2: # copying into linear layer lowercase_ : Optional[int] = array.squeeze().transpose() else: lowercase_ : Optional[int] = np.transpose(__snake_case , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' ) logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' ) lowercase_ : str = torch.from_numpy(__snake_case ) tf_weights.pop(__snake_case , __snake_case ) tf_weights.pop(name + '''/RMSProp''' , __snake_case ) tf_weights.pop(name + '''/RMSProp_1''' , __snake_case ) tf_weights.pop(name + '''/ExponentialMovingAverage''' , __snake_case ) logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' ) return model def lowercase ( __snake_case : torch.Tensor , __snake_case : nn.Convad ): lowercase_ , lowercase_ : Optional[int] = features.shape[-2:] lowercase_ , lowercase_ : str = conv_layer.stride lowercase_ , lowercase_ : Tuple = conv_layer.kernel_size if in_height % stride_height == 0: lowercase_ : Dict = max(kernel_height - stride_height , 0 ) else: lowercase_ : List[Any] = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: lowercase_ : str = max(kernel_width - stride_width , 0 ) else: lowercase_ : int = max(kernel_width - (in_width % stride_width) , 0 ) lowercase_ : int = pad_along_width // 2 lowercase_ : Union[str, Any] = pad_along_width - pad_left lowercase_ : Tuple = pad_along_height // 2 lowercase_ : List[str] = pad_along_height - pad_top lowercase_ : str = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(__snake_case , __snake_case , '''constant''' , 0.0 ) class _UpperCAmelCase ( nn.Module ): def __init__( self : List[Any] , A : MobileNetVaConfig , A : int , A : int , A : int , A : Optional[int] = 1 , A : Optional[int] = 1 , A : bool = False , A : Optional[bool] = True , A : Optional[bool or str] = True , ) -> None: super().__init__() lowercase_ : int = config if in_channels % groups != 0: raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' ) if out_channels % groups != 0: raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' ) lowercase_ : Tuple = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) lowercase_ : int = nn.Convad( in_channels=A , out_channels=A , kernel_size=A , stride=A , padding=A , groups=A , bias=A , padding_mode='''zeros''' , ) if use_normalization: lowercase_ : Optional[Any] = nn.BatchNormad( num_features=A , eps=config.layer_norm_eps , momentum=0.9997 , affine=A , track_running_stats=A , ) else: lowercase_ : Union[str, Any] = None if use_activation: if isinstance(A , A ): lowercase_ : str = ACTaFN[use_activation] elif isinstance(config.hidden_act , A ): lowercase_ : Any = ACTaFN[config.hidden_act] else: lowercase_ : Tuple = config.hidden_act else: lowercase_ : Tuple = None def A ( self : str , A : torch.Tensor ) -> torch.Tensor: if self.config.tf_padding: lowercase_ : List[Any] = apply_tf_padding(A , self.convolution ) lowercase_ : Optional[int] = self.convolution(A ) if self.normalization is not None: lowercase_ : Union[str, Any] = self.normalization(A ) if self.activation is not None: lowercase_ : Optional[int] = self.activation(A ) return features class _UpperCAmelCase ( _A ): SCREAMING_SNAKE_CASE_ : Optional[int] = MobileNetVaConfig SCREAMING_SNAKE_CASE_ : int = load_tf_weights_in_mobilenet_va SCREAMING_SNAKE_CASE_ : Optional[Any] = "mobilenet_v1" SCREAMING_SNAKE_CASE_ : Union[str, Any] = "pixel_values" SCREAMING_SNAKE_CASE_ : List[str] = False def A ( self : Any , A : Union[nn.Linear, nn.Convad] ) -> None: if isinstance(A , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(A , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) __A : Union[str, Any] = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' __A : List[str] = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , _A , ) class _UpperCAmelCase ( _A ): def __init__( self : str , A : MobileNetVaConfig , A : bool = True ) -> int: super().__init__(A ) lowercase_ : Union[str, Any] = config lowercase_ : List[str] = 32 lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth ) lowercase_ : Union[str, Any] = MobileNetVaConvLayer( A , in_channels=config.num_channels , out_channels=A , kernel_size=3 , stride=2 , ) lowercase_ : Optional[Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] lowercase_ : List[Any] = nn.ModuleList() for i in range(13 ): lowercase_ : Dict = out_channels if strides[i] == 2 or i == 0: depth *= 2 lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( A , in_channels=A , out_channels=A , kernel_size=3 , stride=strides[i] , groups=A , ) ) self.layer.append( MobileNetVaConvLayer( A , in_channels=A , out_channels=A , kernel_size=1 , ) ) lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def A ( self : Any , A : Optional[Any] ) -> Optional[int]: raise NotImplementedError @add_start_docstrings_to_model_forward(A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def A ( self : List[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: lowercase_ : str = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('''You have to specify pixel_values''' ) lowercase_ : List[str] = self.conv_stem(A ) lowercase_ : Dict = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): lowercase_ : Optional[int] = layer_module(A ) if output_hidden_states: lowercase_ : str = all_hidden_states + (hidden_states,) lowercase_ : Tuple = hidden_states if self.pooler is not None: lowercase_ : Dict = torch.flatten(self.pooler(A ) , start_dim=1 ) else: lowercase_ : Optional[Any] = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=A , pooler_output=A , hidden_states=A , ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _A , ) class _UpperCAmelCase ( _A ): def __init__( self : List[str] , A : MobileNetVaConfig ) -> None: super().__init__(A ) lowercase_ : int = config.num_labels lowercase_ : List[str] = MobileNetVaModel(A ) lowercase_ : Union[str, Any] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head lowercase_ : Tuple = nn.Dropout(config.classifier_dropout_prob , inplace=A ) lowercase_ : int = nn.Linear(A , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def A ( self : Optional[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ : List[Any] = self.mobilenet_va(A , output_hidden_states=A , return_dict=A ) lowercase_ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1] lowercase_ : Dict = self.classifier(self.dropout(A ) ) lowercase_ : int = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase_ : List[str] = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase_ : Optional[Any] = '''single_label_classification''' else: lowercase_ : Tuple = '''multi_label_classification''' if self.config.problem_type == "regression": lowercase_ : str = MSELoss() if self.num_labels == 1: lowercase_ : List[str] = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowercase_ : List[str] = loss_fct(A , A ) elif self.config.problem_type == "single_label_classification": lowercase_ : List[Any] = CrossEntropyLoss() lowercase_ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase_ : str = BCEWithLogitsLoss() lowercase_ : List[Any] = loss_fct(A , A ) if not return_dict: lowercase_ : Tuple = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=A , logits=A , hidden_states=outputs.hidden_states , )
33
1
'''simple docstring''' import string def UpperCAmelCase ( a_ ) -> None: """simple docstring""" for key in range(len(string.ascii_uppercase ) ): A_ : Optional[int] = """""" for symbol in message: if symbol in string.ascii_uppercase: A_ : List[str] = string.ascii_uppercase.find(a_ ) A_ : List[Any] = num - key if num < 0: A_ : List[str] = num + len(string.ascii_uppercase ) A_ : Tuple = translated + string.ascii_uppercase[num] else: A_ : List[str] = translated + symbol print(F"Decryption using Key #{key}: {translated}" ) def UpperCAmelCase ( ) -> None: """simple docstring""" A_ : List[str] = input("""Encrypted message: """ ) A_ : Any = message.upper() decrypt(a_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
352
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : int = {'vocab_file': 'spm_char.model'} UpperCamelCase__ : Optional[Any] = { 'vocab_file': { 'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model', 'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model', 'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model', } } UpperCamelCase__ : Union[str, Any] = { 'microsoft/speecht5_asr': 1_024, 'microsoft/speecht5_tts': 1_024, 'microsoft/speecht5_vc': 1_024, } class _lowerCAmelCase ( __A ): """simple docstring""" lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase = None , **_lowerCamelCase , ) -> None: A_ : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) A_ : List[Any] = vocab_file A_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_lowerCamelCase ) @property def UpperCAmelCase_ ( self ) -> Any: return self.sp_model.get_piece_size() def UpperCAmelCase_ ( self ) -> int: A_ : Dict = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> str: A_ : Optional[int] = self.__dict__.copy() A_ : str = None return state def __setstate__( self , _lowerCamelCase ) -> List[str]: A_ : int = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): A_ : Union[str, Any] = {} A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]: return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]: return self.sp_model.piece_to_id(_lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]: A_ : Dict = self.sp_model.IdToPiece(_lowerCamelCase ) return token def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Union[str, Any]: A_ : Tuple = [] A_ : Union[str, Any] = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_lowerCamelCase ) + token A_ : Optional[int] = [] else: current_sub_tokens.append(_lowerCamelCase ) out_string += self.sp_model.decode(_lowerCamelCase ) return out_string.strip() def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) A_ : Union[str, Any] = [1] if token_ids_a is None: return ([0] * len(_lowerCamelCase )) + suffix_ones return ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]: if not os.path.isdir(_lowerCamelCase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return A_ : Optional[int] = os.path.join( _lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCamelCase , """wb""" ) as fi: A_ : List[str] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (out_vocab_file,)
164
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self , _A , _A=7 , _A=3 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=True , _A=1 / 255 , _A=True , ): '''simple docstring''' __SCREAMING_SNAKE_CASE = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333} __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = min_resolution __SCREAMING_SNAKE_CASE = max_resolution __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean __SCREAMING_SNAKE_CASE = image_std __SCREAMING_SNAKE_CASE = do_rescale __SCREAMING_SNAKE_CASE = rescale_factor __SCREAMING_SNAKE_CASE = do_pad def _A ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _A ( self , _A , _A=False ): '''simple docstring''' if not batched: __SCREAMING_SNAKE_CASE = image_inputs[0] if isinstance(_A , Image.Image ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.size else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2] if w < h: __SCREAMING_SNAKE_CASE = int(self.size['shortest_edge'] * h / w ) __SCREAMING_SNAKE_CASE = self.size['shortest_edge'] elif w > h: __SCREAMING_SNAKE_CASE = self.size['shortest_edge'] __SCREAMING_SNAKE_CASE = int(self.size['shortest_edge'] * w / h ) else: __SCREAMING_SNAKE_CASE = self.size['shortest_edge'] __SCREAMING_SNAKE_CASE = self.size['shortest_edge'] else: __SCREAMING_SNAKE_CASE = [] for image in image_inputs: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __SCREAMING_SNAKE_CASE = max(_A , key=lambda _A : item[0] )[0] __SCREAMING_SNAKE_CASE = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase__ : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessingTester(self ) @property def _A ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , 'image_mean' ) ) self.assertTrue(hasattr(_A , 'image_std' ) ) self.assertTrue(hasattr(_A , 'do_normalize' ) ) self.assertTrue(hasattr(_A , 'do_resize' ) ) self.assertTrue(hasattr(_A , 'size' ) ) def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} ) self.assertEqual(image_processor.do_pad , _A ) __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_A ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , _A ) def _A ( self ): '''simple docstring''' pass def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_A , batched=_A ) __SCREAMING_SNAKE_CASE = image_processing(_A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(_A , return_tensors='pt' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(_A , return_tensors='pt' ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: __SCREAMING_SNAKE_CASE = json.loads(f.read() ) __SCREAMING_SNAKE_CASE = {'image_id': 39_769, 'annotations': target} # encode them __SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' ) __SCREAMING_SNAKE_CASE = image_processing(images=_A , annotations=_A , return_tensors='pt' ) # verify pixel values __SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape , _A ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1e-4 ) ) # verify area __SCREAMING_SNAKE_CASE = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) ) # verify boxes __SCREAMING_SNAKE_CASE = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _A ) __SCREAMING_SNAKE_CASE = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1e-3 ) ) # verify image_id __SCREAMING_SNAKE_CASE = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) ) # verify is_crowd __SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) ) # verify class_labels __SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) ) # verify orig_size __SCREAMING_SNAKE_CASE = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) ) # verify size __SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) ) @slow def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: __SCREAMING_SNAKE_CASE = json.loads(f.read() ) __SCREAMING_SNAKE_CASE = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target} __SCREAMING_SNAKE_CASE = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them __SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format='coco_panoptic' ) __SCREAMING_SNAKE_CASE = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='pt' ) # verify pixel values __SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape , _A ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1e-4 ) ) # verify area __SCREAMING_SNAKE_CASE = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) ) # verify boxes __SCREAMING_SNAKE_CASE = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _A ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1e-3 ) ) # verify image_id __SCREAMING_SNAKE_CASE = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) ) # verify is_crowd __SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) ) # verify class_labels __SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) ) # verify masks __SCREAMING_SNAKE_CASE = 822_873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _A ) # verify orig_size __SCREAMING_SNAKE_CASE = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) ) # verify size __SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
257
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params lowerCAmelCase__ : Optional[int] =getLogger(__name__) lowerCAmelCase__ : List[str] ='''cuda''' if torch.cuda.is_available() else '''cpu''' def __lowercase ( a__ , a__ , a__ , a__ = 8 , a__ = DEFAULT_DEVICE , a__=False , a__="summarization" , a__=None , **a__ , ) -> Dict: __SCREAMING_SNAKE_CASE = Path(a__ ).open('w' , encoding='utf-8' ) __SCREAMING_SNAKE_CASE = str(a__ ) __SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(a__ ).to(a__ ) if fpaa: __SCREAMING_SNAKE_CASE = model.half() __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a__ ) logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type. __SCREAMING_SNAKE_CASE = time.time() # update config with task specific params use_task_specific_params(a__ , a__ ) if prefix is None: __SCREAMING_SNAKE_CASE = prefix or getattr(model.config , 'prefix' , '' ) or '' for examples_chunk in tqdm(list(chunks(a__ , a__ ) ) ): __SCREAMING_SNAKE_CASE = [prefix + text for text in examples_chunk] __SCREAMING_SNAKE_CASE = tokenizer(a__ , return_tensors='pt' , truncation=a__ , padding='longest' ).to(a__ ) __SCREAMING_SNAKE_CASE = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **a__ , ) __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a__ , skip_special_tokens=a__ , clean_up_tokenization_spaces=a__ ) for hypothesis in dec: fout.write(hypothesis + '\n' ) fout.flush() fout.close() __SCREAMING_SNAKE_CASE = int(time.time() - start_time ) # seconds __SCREAMING_SNAKE_CASE = len(a__ ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def __lowercase ( ) -> Any: return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' ) def __lowercase ( a__=True ) -> int: __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument('model_name' , type=a__ , help='like facebook/bart-large-cnn,t5-base, etc.' ) parser.add_argument('input_path' , type=a__ , help='like cnn_dm/test.source' ) parser.add_argument('save_path' , type=a__ , help='where to save summaries' ) parser.add_argument('--reference_path' , type=a__ , required=a__ , help='like cnn_dm/test.target' ) parser.add_argument('--score_path' , type=a__ , required=a__ , default='metrics.json' , help='where to save metrics' ) parser.add_argument('--device' , type=a__ , required=a__ , default=a__ , help='cuda, cuda:1, cpu etc.' ) parser.add_argument( '--prefix' , type=a__ , required=a__ , default=a__ , help='will be added to the begininng of src examples' ) parser.add_argument('--task' , type=a__ , default='summarization' , help='used for task_specific_params + metrics' ) parser.add_argument('--bs' , type=a__ , default=8 , required=a__ , help='batch size' ) parser.add_argument( '--n_obs' , type=a__ , default=-1 , required=a__ , help='How many observations. Defaults to all.' ) parser.add_argument('--fp16' , action='store_true' ) parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' ) parser.add_argument( '--info' , nargs='?' , type=a__ , const=datetime_now() , help=( 'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.' ' lang=en-ru. If no value is passed, the current datetime string will be used.' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_known_args() __SCREAMING_SNAKE_CASE = parse_numeric_n_bool_cl_kwargs(a__ ) if parsed_args and verbose: print(f"""parsed the following generate kwargs: {parsed_args}""" ) __SCREAMING_SNAKE_CASE = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: __SCREAMING_SNAKE_CASE = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=a__ ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('Can\'t mix --fp16 and --device cpu' ) __SCREAMING_SNAKE_CASE = generate_summaries_or_translations( a__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **a__ , ) if args.reference_path is None: return {} # Compute scores __SCREAMING_SNAKE_CASE = calculate_bleu if 'translation' in args.task else calculate_rouge __SCREAMING_SNAKE_CASE = [x.rstrip() for x in open(args.save_path ).readlines()] __SCREAMING_SNAKE_CASE = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(a__ )] __SCREAMING_SNAKE_CASE = score_fn(a__ , a__ ) scores.update(a__ ) if args.dump_args: scores.update(a__ ) if args.info: __SCREAMING_SNAKE_CASE = args.info if verbose: print(a__ ) if args.score_path is not None: json.dump(a__ , open(args.score_path , 'w' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
257
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : str , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any]): '''simple docstring''' warnings.warn( '''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use OwlViTImageProcessor instead.''' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_)
318
"""simple docstring""" from scipy.stats import pearsonr import datasets UpperCAmelCase_ : List[Any] = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ UpperCAmelCase_ : Optional[int] = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ UpperCAmelCase_ : Tuple = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float'''), '''references''': datasets.Value('''float'''), }) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=False): '''simple docstring''' if return_pvalue: SCREAMING_SNAKE_CASE_ : int = pearsonr(lowercase_ , lowercase_) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(lowercase_ , lowercase_)[0])}
318
1
from dataclasses import dataclass, field from typing import Optional @dataclass class A_ : _UpperCAmelCase : Optional[str] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} ) _UpperCAmelCase : Optional[str] = field( default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} ) _UpperCAmelCase : Optional[str] = field( default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} ) _UpperCAmelCase : Optional[str] = field( default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} ) _UpperCAmelCase : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for training.'''} ) _UpperCAmelCase : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} ) _UpperCAmelCase : Optional[float] = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} ) _UpperCAmelCase : Optional[int] = field( default=10_000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} ) _UpperCAmelCase : Optional[float] = field(default=2E-4 , metadata={'''help''': '''Learning rate fo training.'''} ) _UpperCAmelCase : Optional[str] = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} ) _UpperCAmelCase : Optional[int] = field( default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} ) _UpperCAmelCase : Optional[int] = field( default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} ) _UpperCAmelCase : Optional[bool] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} ) _UpperCAmelCase : Optional[int] = field(default=50_000 , metadata={'''help''': '''Maximum number of training steps.'''} ) _UpperCAmelCase : Optional[int] = field( default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} ) _UpperCAmelCase : Optional[int] = field(default=1_024 , metadata={'''help''': '''Sequence lengths used for training.'''} ) _UpperCAmelCase : Optional[int] = field(default=1 , metadata={'''help''': '''Training seed.'''} ) _UpperCAmelCase : Optional[int] = field( default=1_024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , ) _UpperCAmelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} ) _UpperCAmelCase : Optional[bool] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''If True the data is pretokenized.'''} ) @dataclass class A_ : _UpperCAmelCase : Optional[str] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} ) _UpperCAmelCase : Optional[str] = field( default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} ) _UpperCAmelCase : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} ) _UpperCAmelCase : Optional[int] = field( default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} ) _UpperCAmelCase : Optional[int] = field(default=1_024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} ) _UpperCAmelCase : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} ) @dataclass class A_ : _UpperCAmelCase : Optional[str] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} ) _UpperCAmelCase : Optional[int] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Number of workers used for code evaluation.'''} ) _UpperCAmelCase : Optional[int] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , ) _UpperCAmelCase : Optional[bool] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} ) _UpperCAmelCase : Optional[float] = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} ) _UpperCAmelCase : Optional[int] = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} ) _UpperCAmelCase : Optional[int] = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} ) _UpperCAmelCase : Optional[float] = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} ) _UpperCAmelCase : Optional[int] = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} ) _UpperCAmelCase : Optional[int] = field( default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} ) _UpperCAmelCase : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} ) _UpperCAmelCase : Optional[str] = field( default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} ) _UpperCAmelCase : Optional[str] = field( default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} ) _UpperCAmelCase : Optional[int] = field( default=-1 , metadata={ '''help''': ( '''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive''' ''' number corresponds to which GPU device id to run on.''' ) } , ) @dataclass class A_ : _UpperCAmelCase : Optional[int] = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.''' } , ) _UpperCAmelCase : Optional[str] = field( default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} ) _UpperCAmelCase : Optional[str] = field( default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} ) _UpperCAmelCase : Optional[int] = field( default=100_000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} ) _UpperCAmelCase : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} ) _UpperCAmelCase : Optional[float] = field( default=1_000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} ) _UpperCAmelCase : Optional[float] = field( default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} ) _UpperCAmelCase : Optional[float] = field( default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} ) _UpperCAmelCase : Optional[float] = field( default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} ) _UpperCAmelCase : Optional[float] = field( default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} ) _UpperCAmelCase : Optional[str] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , ) _UpperCAmelCase : Optional[bool] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} ) _UpperCAmelCase : Optional[float] = field( default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} ) @dataclass class A_ : _UpperCAmelCase : Optional[str] = field( default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} ) _UpperCAmelCase : Optional[str] = field( default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} ) _UpperCAmelCase : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} ) _UpperCAmelCase : Optional[int] = field(default=200_000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} ) _UpperCAmelCase : Optional[int] = field( default=32_768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} ) _UpperCAmelCase : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} ) _UpperCAmelCase : Optional[bool] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Push saved tokenizer to the hub.'''} ) @dataclass class A_ : _UpperCAmelCase : Optional[str] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} ) _UpperCAmelCase : Optional[str] = field( default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} ) _UpperCAmelCase : Optional[str] = field( default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} ) _UpperCAmelCase : Optional[int] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Number of workers used for code evaluation.'''} ) @dataclass class A_ : _UpperCAmelCase : Optional[str] = field( default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} ) _UpperCAmelCase : Optional[str] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} ) _UpperCAmelCase : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} ) _UpperCAmelCase : Optional[bool] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
73
'''simple docstring''' # Lint as: python3 import itertools import os import re lowerCamelCase : Any = re.compile(R'([A-Z]+)([A-Z][a-z])') lowerCamelCase : str = re.compile(R'([a-z\d])([A-Z])') lowerCamelCase : Optional[int] = re.compile(R'(?<!_)_(?!_)') lowerCamelCase : List[Any] = re.compile(R'(_{2,})') lowerCamelCase : str = R'^\w+(\.\w+)*$' lowerCamelCase : Dict = R'<>:/\|?*' def _SCREAMING_SNAKE_CASE (A ) -> Any: """simple docstring""" lowercase__ = _uppercase_uppercase_re.sub(R'''\1_\2''' , A ) lowercase__ = _lowercase_uppercase_re.sub(R'''\1_\2''' , A ) return name.lower() def _SCREAMING_SNAKE_CASE (A ) -> Tuple: """simple docstring""" lowercase__ = _single_underscore_re.split(A ) lowercase__ = [_multiple_underscores_re.split(A ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(A ) if n != '''''' ) def _SCREAMING_SNAKE_CASE (A ) -> Tuple: """simple docstring""" if os.path.basename(A ) != name: raise ValueError(f"Should be a dataset name, not a path: {name}" ) return camelcase_to_snakecase(A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]: """simple docstring""" if os.path.basename(A ) != name: raise ValueError(f"Should be a dataset name, not a path: {name}" ) if not re.match(_split_re , A ): raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." ) return f"{filename_prefix_for_name(A )}-{split}" def _SCREAMING_SNAKE_CASE (A , A , A , A=None ) -> List[str]: """simple docstring""" lowercase__ = filename_prefix_for_split(A , A ) if filetype_suffix: prefix += f".{filetype_suffix}" lowercase__ = os.path.join(A , A ) return f"{filepath}*" def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None ) -> Optional[Any]: """simple docstring""" lowercase__ = filename_prefix_for_split(A , A ) lowercase__ = os.path.join(A , A ) if shard_lengths: lowercase__ = len(A ) lowercase__ = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(A )] if filetype_suffix: lowercase__ = [filename + f".{filetype_suffix}" for filename in filenames] return filenames else: lowercase__ = prefix if filetype_suffix: filename += f".{filetype_suffix}" return [filename]
2
0
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-canny' , from_pt=__lowercase , dtype=jnp.bfloataa) SCREAMING_SNAKE_CASE = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=__lowercase , from_pt=__lowercase , dtype=jnp.bfloataa) SCREAMING_SNAKE_CASE = controlnet_params SCREAMING_SNAKE_CASE = '''bird''' SCREAMING_SNAKE_CASE = jax.device_count() SCREAMING_SNAKE_CASE = pipe.prepare_text_inputs([prompts] * num_samples) SCREAMING_SNAKE_CASE = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png') SCREAMING_SNAKE_CASE = pipe.prepare_image_inputs([canny_image] * num_samples) SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0) SCREAMING_SNAKE_CASE = jax.random.split(__lowercase , jax.device_count()) SCREAMING_SNAKE_CASE = replicate(__lowercase) SCREAMING_SNAKE_CASE = shard(__lowercase) SCREAMING_SNAKE_CASE = shard(__lowercase) SCREAMING_SNAKE_CASE = pipe( prompt_ids=__lowercase , image=__lowercase , params=__lowercase , prng_seed=__lowercase , num_inference_steps=50 , jit=__lowercase , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) SCREAMING_SNAKE_CASE = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) SCREAMING_SNAKE_CASE = images[0, 253:256, 253:256, -1] SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten())) SCREAMING_SNAKE_CASE = jnp.array( [0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78]) print(f'''output_slice: {output_slice}''') assert jnp.abs(output_slice - expected_slice).max() < 1E-2 def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-openpose' , from_pt=__lowercase , dtype=jnp.bfloataa) SCREAMING_SNAKE_CASE = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=__lowercase , from_pt=__lowercase , dtype=jnp.bfloataa) SCREAMING_SNAKE_CASE = controlnet_params SCREAMING_SNAKE_CASE = '''Chef in the kitchen''' SCREAMING_SNAKE_CASE = jax.device_count() SCREAMING_SNAKE_CASE = pipe.prepare_text_inputs([prompts] * num_samples) SCREAMING_SNAKE_CASE = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png') SCREAMING_SNAKE_CASE = pipe.prepare_image_inputs([pose_image] * num_samples) SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0) SCREAMING_SNAKE_CASE = jax.random.split(__lowercase , jax.device_count()) SCREAMING_SNAKE_CASE = replicate(__lowercase) SCREAMING_SNAKE_CASE = shard(__lowercase) SCREAMING_SNAKE_CASE = shard(__lowercase) SCREAMING_SNAKE_CASE = pipe( prompt_ids=__lowercase , image=__lowercase , params=__lowercase , prng_seed=__lowercase , num_inference_steps=50 , jit=__lowercase , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) SCREAMING_SNAKE_CASE = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) SCREAMING_SNAKE_CASE = images[0, 253:256, 253:256, -1] SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten())) SCREAMING_SNAKE_CASE = jnp.array( [[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]]) print(f'''output_slice: {output_slice}''') assert jnp.abs(output_slice - expected_slice).max() < 1E-2
367
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a_ : Optional[Any] = { 'configuration_efficientnet': [ 'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EfficientNetConfig', 'EfficientNetOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = ['EfficientNetImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Union[str, Any] = [ 'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'EfficientNetForImageClassification', 'EfficientNetModel', 'EfficientNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys a_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
327
0
"""simple docstring""" import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json a : Tuple = '''sshleifer/mar_enro_6_3_student''' class __UpperCamelCase ( a__ ): def __a ( self ) -> Optional[Any]: super().setUp() a : Any = cached_path( "https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=lowerCAmelCase__ , ) a : List[Any] = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k""" @slow @require_torch_gpu def __a ( self ) -> List[Any]: MarianMTModel.from_pretrained(lowerCAmelCase__ ) @slow @require_torch_gpu def __a ( self ) -> List[str]: a : Optional[int] = { "$MAX_LEN": 64, "$BS": 64, "$GAS": 1, "$ENRO_DIR": self.data_dir, "facebook/mbart-large-cc25": MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", "--learning_rate=3e-5": "--learning_rate 3e-4", "--num_train_epochs 6": "--num_train_epochs 1", } # Clean up bash script a : str = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip() a : int = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" ) for k, v in env_vars_to_replace.items(): a : int = bash_script.replace(lowerCAmelCase__ , str(lowerCAmelCase__ ) ) a : Any = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") a : List[str] = f""" --output_dir {output_dir} --tokenizer_name Helsinki-NLP/opus-mt-en-ro --sortish_sampler --do_predict --gpus 1 --freeze_encoder --n_train 40000 --n_val 500 --n_test 500 --fp16_opt_level O1 --num_sanity_val_steps 0 --eval_beams 2 """.split() # XXX: args.gpus > 1 : handle multi_gpu in the future a : Dict = ["finetune.py"] + bash_script.split() + args with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ): a : List[Any] = argparse.ArgumentParser() a : Dict = pl.Trainer.add_argparse_args(lowerCAmelCase__ ) a : List[str] = SummarizationModule.add_model_specific_args(lowerCAmelCase__ , os.getcwd() ) a : str = parser.parse_args() a : Union[str, Any] = main(lowerCAmelCase__ ) # Check metrics a : List[str] = load_json(model.metrics_save_path ) a : Optional[int] = metrics["val"][0] a : Dict = metrics["val"][-1] self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , lowerCAmelCase__ ) self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats["val_avg_bleu"] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict a : int = os.listdir(lowerCAmelCase__ ) a : Tuple = [x for x in contents if x.endswith(".ckpt" )][0] a : Optional[Any] = os.path.join(args.output_dir , lowerCAmelCase__ ) a : Any = torch.load(lowerCAmelCase__ , map_location="cpu" ) a : Dict = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight" assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: a : Dict = {os.path.basename(lowerCAmelCase__ ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics["test"] ) == 1 class __UpperCamelCase ( a__ ): @timeout_decorator.timeout(600 ) @slow @require_torch_gpu def __a ( self ) -> Union[str, Any]: a : int = f"""{self.test_file_dir_str}/test_data/wmt_en_ro""" a : Optional[Any] = { "--fp16_opt_level=O1": "", "$MAX_LEN": 128, "$BS": 16, "$GAS": 1, "$ENRO_DIR": data_dir, "$m": "sshleifer/student_marian_en_ro_6_1", "val_check_interval=0.25": "val_check_interval=1.0", } # Clean up bash script a : Any = ( (self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip() ) a : Union[str, Any] = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" ) a : Any = bash_script.replace("--fp16 " , " " ) for k, v in env_vars_to_replace.items(): a : Dict = bash_script.replace(lowerCAmelCase__ , str(lowerCAmelCase__ ) ) a : int = self.get_auto_remove_tmp_dir() a : Union[str, Any] = bash_script.replace("--fp16" , "" ) a : Optional[int] = 6 a : str = ( ["distillation.py"] + bash_script.split() + [ f"""--output_dir={output_dir}""", "--gpus=1", "--learning_rate=1e-3", f"""--num_train_epochs={epochs}""", "--warmup_steps=10", "--val_check_interval=1.0", "--do_predict", ] ) with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ): a : int = argparse.ArgumentParser() a : Optional[int] = pl.Trainer.add_argparse_args(lowerCAmelCase__ ) a : Tuple = SummarizationDistiller.add_model_specific_args(lowerCAmelCase__ , os.getcwd() ) a : List[str] = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu a : Optional[int] = distill_main(lowerCAmelCase__ ) # Check metrics a : Tuple = load_json(model.metrics_save_path ) a : Union[str, Any] = metrics["val"][0] a : List[Any] = metrics["val"][-1] assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , lowerCAmelCase__ ) # check lightning ckpt can be loaded and has a reasonable statedict a : List[str] = os.listdir(lowerCAmelCase__ ) a : Optional[Any] = [x for x in contents if x.endswith(".ckpt" )][0] a : Optional[int] = os.path.join(args.output_dir , lowerCAmelCase__ ) a : Optional[Any] = torch.load(lowerCAmelCase__ , map_location="cpu" ) a : int = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight" assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: a : Optional[int] = {os.path.basename(lowerCAmelCase__ ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics["test"] ) == 1
105
from datetime import datetime as dt import os from github import Github __A : Dict = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def SCREAMING_SNAKE_CASE__ ( ) -> int: '''simple docstring''' lowerCAmelCase : Optional[Any] = Github(os.environ['GITHUB_TOKEN'] ) lowerCAmelCase : List[str] = g.get_repo('huggingface/transformers' ) lowerCAmelCase : Tuple = repo.get_issues(state='open' ) for issue in open_issues: lowerCAmelCase : Dict = sorted([comment for comment in issue.get_comments()], key=lambda _UpperCAmelCase : i.created_at, reverse=_UpperCAmelCase ) lowerCAmelCase : Optional[Any] = comments[0] if len(_UpperCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='closed' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) if __name__ == "__main__": main()
138
0
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase): __SCREAMING_SNAKE_CASE = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def __lowerCamelCase ( self , lowercase=0 ) -> Tuple: __UpperCamelCase = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(lowercase ) ) __UpperCamelCase = np.random.RandomState(lowercase ) __UpperCamelCase = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """strength""": 0.75, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = self.get_dummy_inputs() __UpperCamelCase = pipe(**lowercase ).images __UpperCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_2_8, 1_2_8, 3) __UpperCamelCase = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase ) pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = self.get_dummy_inputs() __UpperCamelCase = pipe(**lowercase ).images __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __UpperCamelCase = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCamelCase ( self ) -> Tuple: __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase ) # warmup pass to apply optimizations __UpperCamelCase = pipe(**self.get_dummy_inputs() ) __UpperCamelCase = self.get_dummy_inputs() __UpperCamelCase = pipe(**lowercase ).images __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __UpperCamelCase = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCamelCase ( self ) -> List[Any]: __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = self.get_dummy_inputs() __UpperCamelCase = pipe(**lowercase ).images __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __UpperCamelCase = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCamelCase ( self ) -> Any: __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = self.get_dummy_inputs() __UpperCamelCase = pipe(**lowercase ).images __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __UpperCamelCase = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCamelCase ( self ) -> int: __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = self.get_dummy_inputs() __UpperCamelCase = pipe(**lowercase ).images __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __UpperCamelCase = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase): @property def __lowerCamelCase ( self ) -> int: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __lowerCamelCase ( self ) -> List[str]: __UpperCamelCase = ort.SessionOptions() __UpperCamelCase = False return options def __lowerCamelCase ( self ) -> Any: __UpperCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) __UpperCamelCase = init_image.resize((7_6_8, 5_1_2) ) # using the PNDM scheduler by default __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = """A fantasy landscape, trending on artstation""" __UpperCamelCase = np.random.RandomState(0 ) __UpperCamelCase = pipe( prompt=lowercase , image=lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowercase , output_type="""np""" , ) __UpperCamelCase = output.images __UpperCamelCase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 7_6_8, 3) __UpperCamelCase = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) __UpperCamelCase = init_image.resize((7_6_8, 5_1_2) ) __UpperCamelCase = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = """A fantasy landscape, trending on artstation""" __UpperCamelCase = np.random.RandomState(0 ) __UpperCamelCase = pipe( prompt=lowercase , image=lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=lowercase , output_type="""np""" , ) __UpperCamelCase = output.images __UpperCamelCase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 7_6_8, 3) __UpperCamelCase = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
366
'''simple docstring''' import csv import tweepy # Twitter API credentials a__ : Dict = '' a__ : List[str] = '' a__ : Optional[Any] = '' a__ : Any = '' def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = tweepy.OAuthHandler(__A ,__A ) auth.set_access_token(__A ,__A ) __UpperCamelCase = tweepy.API(__A ) # initialize a list to hold all the tweepy Tweets __UpperCamelCase = [] # make initial request for most recent tweets (200 is the maximum allowed count) __UpperCamelCase = api.user_timeline(screen_name=__A ,count=200 ) # save most recent tweets alltweets.extend(__A ) # save the id of the oldest tweet less one __UpperCamelCase = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(__A ) > 0: print(f"getting tweets before {oldest}" ) # all subsequent requests use the max_id param to prevent duplicates __UpperCamelCase = api.user_timeline( screen_name=__A ,count=200 ,max_id=__A ) # save most recent tweets alltweets.extend(__A ) # update the id of the oldest tweet less one __UpperCamelCase = alltweets[-1].id - 1 print(f"...{len(__A )} tweets downloaded so far" ) # transform the tweepy tweets into a 2D array that will populate the csv __UpperCamelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f"new_{screen_name}_tweets.csv" ,"""w""" ) as f: __UpperCamelCase = csv.writer(__A ) writer.writerow(["""id""", """created_at""", """text"""] ) writer.writerows(__A ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('FirePing32')
243
0
'''simple docstring''' import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def UpperCamelCase_ ( *snake_case_ : Dict ) -> Any: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ): __lowerCAmelCase = list(snake_case_ ) for i in range(len(snake_case_ ) ): __lowerCAmelCase = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def UpperCamelCase_ ( snake_case_ : Exception ) -> bool: '''simple docstring''' __lowerCAmelCase = [ """CUDA out of memory.""", # CUDA OOM """cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU """DefaultCPUAllocator: can't allocate memory""", # CPU OOM ] if isinstance(snake_case_ , snake_case_ ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def UpperCamelCase_ ( snake_case_ : callable = None , snake_case_ : int = 1_28 ) -> List[Any]: '''simple docstring''' if function is None: return functools.partial(snake_case_ , starting_batch_size=snake_case_ ) __lowerCAmelCase = starting_batch_size def decorator(*snake_case_ : List[str] , **snake_case_ : Union[str, Any] ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() __lowerCAmelCase = list(inspect.signature(snake_case_ ).parameters.keys() ) # Guard against user error if len(snake_case_ ) < (len(snake_case_ ) + 1): __lowerCAmelCase = """, """.join([f"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( f"""Batch size was passed into `{function.__name__}` as the first argument when called.""" f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" ) while True: if batch_size == 0: raise RuntimeError("""No executable batch size found, reached zero.""" ) try: return function(snake_case_ , *snake_case_ , **snake_case_ ) except Exception as e: if should_reduce_batch_size(snake_case_ ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
229
'''simple docstring''' def UpperCamelCase_ ( snake_case_ : int , snake_case_ : int ) -> str: '''simple docstring''' if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) __lowerCAmelCase = str(bin(snake_case_ ) )[2:] # remove the leading "0b" __lowerCAmelCase = str(bin(snake_case_ ) )[2:] # remove the leading "0b" __lowerCAmelCase = max(len(snake_case_ ) , len(snake_case_ ) ) return "0b" + "".join( str(int(char_a == """1""" and char_b == """1""" ) ) for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
229
1
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = 0 @slow def lowercase_ ( self : Dict ): '''simple docstring''' for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(_A ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(_A ) , 0 ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(_A ) self.assertIsInstance(_A , _A ) # Check that tokenizer_type ≠ model_type UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , config=_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def lowercase_ ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) ) UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' , use_fast=_A ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) ) UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' , use_fast=_A ) self.assertIsInstance(_A , _A ) @require_tokenizers def lowercase_ ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) ) UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' with pytest.raises(_A ): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' ) @require_tokenizers def lowercase_ ( self : int ): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: UpperCAmelCase__ : Optional[int] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) if isinstance(_A , _A ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _A ) else: self.assertEqual(tokenizer.do_lower_case , _A ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def lowercase_ ( self : List[str] ): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( _A , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): UpperCAmelCase__ : Dict = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TOKENIZER_MAPPING.values() UpperCAmelCase__ : Any = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(_A ) @require_tokenizers def lowercase_ ( self : Optional[int] ): '''simple docstring''' self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_A ) , _A ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _A ) @require_tokenizers def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_A ) UpperCAmelCase__ : Any = '''Hello, world. How are you?''' UpperCAmelCase__ : Dict = tokenizer.tokenize(_A ) self.assertEqual('''[UNK]''' , tokens[0] ) UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_A ) UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_A ) self.assertEqual('''[UNK]''' , tokens[0] ) @require_tokenizers def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(_A ) , _A ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 30_000 ) self.assertEqual(tokenizer.unk_token , '''[UNK]''' ) self.assertEqual(tokenizer.padding_side , '''right''' ) self.assertEqual(tokenizer.truncation_side , '''right''' ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(_A , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : str = get_tokenizer_config('''bert-base-cased''' ) UpperCAmelCase__ : Optional[int] = config.pop('''_commit_hash''' , _A ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(_A , {'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. UpperCAmelCase__ : Tuple = get_tokenizer_config(_A ) self.assertDictEqual(_A , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = get_tokenizer_config(_A ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' ) def lowercase_ ( self : Dict ): '''simple docstring''' try: AutoConfig.register('''custom''' , _A ) AutoTokenizer.register(_A , slow_tokenizer_class=_A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoTokenizer.register(_A , slow_tokenizer_class=_A ) UpperCAmelCase__ : Optional[int] = CustomTokenizer.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def lowercase_ ( self : Any ): '''simple docstring''' try: AutoConfig.register('''custom''' , _A ) # Can register in two steps AutoTokenizer.register(_A , slow_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(_A , fast_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( _A , slow_tokenizer_class=_A , fast_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoTokenizer.register(_A , fast_tokenizer_class=_A ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase__ : Any = BertTokenizerFast.from_pretrained(_A ) bert_tokenizer.save_pretrained(_A ) UpperCAmelCase__ : Optional[int] = CustomTokenizerFast.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(_A , use_fast=_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase_ ( self : Optional[int] ): '''simple docstring''' with self.assertRaises(_A ): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_A ): UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A , use_fast=_A ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) @require_tokenizers def lowercase_ ( self : int ): '''simple docstring''' class lowerCamelCase_ ( __a ): lowerCAmelCase__ = False class lowerCamelCase_ ( __a ): lowerCAmelCase__ = NewTokenizer lowerCAmelCase__ = False try: AutoConfig.register('''custom''' , _A ) AutoTokenizer.register(_A , slow_tokenizer_class=_A ) AutoTokenizer.register(_A , fast_tokenizer_class=_A ) # If remote code is not set, the default is to use local UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase__ : str = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A , use_fast=_A ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' with self.assertRaisesRegex( _A , '''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' ) def lowercase_ ( self : Dict ): '''simple docstring''' with self.assertRaisesRegex( _A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , revision='''aaaaaa''' ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
299
'''simple docstring''' # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def a__ ( lowerCAmelCase__ ) -> List[Any]: return 1 / (1 + np.exp(-z )) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: return (-y * np.log(lowerCAmelCase__ ) - (1 - y) * np.log(1 - h )).mean() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: UpperCAmelCase__ : str = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase__ ) ) ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=7_00_00 ) -> List[Any]: UpperCAmelCase__ : Tuple = np.zeros(x.shape[1] ) for iterations in range(lowerCAmelCase__ ): UpperCAmelCase__ : List[Any] = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = sigmoid_function(lowerCAmelCase__ ) UpperCAmelCase__ : int = np.dot(x.T , h - y ) / y.size UpperCAmelCase__ : Optional[int] = theta - alpha * gradient # updating the weights UpperCAmelCase__ : Dict = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : int = sigmoid_function(lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = cost_function(lowerCAmelCase__ , lowerCAmelCase__ ) if iterations % 1_00 == 0: print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCamelCase__ = datasets.load_iris() UpperCamelCase__ = iris.data[:, :2] UpperCamelCase__ = (iris.target != 0) * 1 UpperCamelCase__ = 0.1 UpperCamelCase__ = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0) print('''theta: ''', theta) # printing the theta i.e our weights vector def a__ ( lowerCAmelCase__ ) -> Dict: return sigmoid_function( np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(1_0, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCamelCase__) , (UpperCamelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCamelCase__ = np.c_[xxa.ravel(), xxa.ravel()] UpperCamelCase__ = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
299
1
'''simple docstring''' import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": __lowercase = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') __lowercase = f'https://www.google.com/search?q={query}&num=100' __lowercase = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: __lowercase = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: __lowercase = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
272
"""simple docstring""" def a__ ( __SCREAMING_SNAKE_CASE ) -> int: __lowerCAmelCase: Optional[Any] = 1 for i in range(1 , num + 1 ): fact *= i return fact def a__ ( __SCREAMING_SNAKE_CASE ) -> int: __lowerCAmelCase: List[str] = 0 while number > 0: __lowerCAmelCase: Any = number % 1_0 sum_of_digits += last_digit __lowerCAmelCase: List[Any] = number // 1_0 # Removing the last_digit from the given number return sum_of_digits def a__ ( __SCREAMING_SNAKE_CASE = 1_0_0 ) -> int: __lowerCAmelCase: Tuple = factorial(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Optional[int] = split_and_add(__SCREAMING_SNAKE_CASE ) return result if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
217
0
"""simple docstring""" from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract __snake_case = logging.get_logger(__name__) def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : int, _lowerCAmelCase : Any ): """simple docstring""" return [ int(10_00 * (box[0] / width) ), int(10_00 * (box[1] / height) ), int(10_00 * (box[2] / width) ), int(10_00 * (box[3] / height) ), ] def A_ ( _lowerCAmelCase : np.ndarray, _lowerCAmelCase : Optional[str], _lowerCAmelCase : Optional[str] ): """simple docstring""" _a = to_pil_image(_lowerCAmelCase ) _a , _a = pil_image.size _a = pytesseract.image_to_data(_lowerCAmelCase, lang=_lowerCAmelCase, output_type='''dict''', config=_lowerCAmelCase ) _a , _a , _a , _a , _a = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates _a = [idx for idx, word in enumerate(_lowerCAmelCase ) if not word.strip()] _a = [word for idx, word in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices] _a = [coord for idx, coord in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices] _a = [coord for idx, coord in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices] _a = [coord for idx, coord in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices] _a = [coord for idx, coord in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format _a = [] for x, y, w, h in zip(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ): _a = [x, y, x + w, y + h] actual_boxes.append(_lowerCAmelCase ) # finally, normalize the bounding boxes _a = [] for box in actual_boxes: normalized_boxes.append(normalize_box(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ), "Not as many words as there are bounding boxes" return words, normalized_boxes class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : str = ['pixel_values'] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = "" , **__UpperCAmelCase , ) -> None: super().__init__(**__UpperCAmelCase ) _a = size if size is not None else {'''height''': 224, '''width''': 224} _a = get_size_dict(__UpperCAmelCase ) _a = do_resize _a = size _a = resample _a = do_rescale _a = rescale_value _a = do_normalize _a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a = image_std if image_std is not None else IMAGENET_STANDARD_STD _a = apply_ocr _a = ocr_lang _a = tesseract_config def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: _a = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' ) _a = (size['''height'''], size['''width''']) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image: _a = do_resize if do_resize is not None else self.do_resize _a = size if size is not None else self.size _a = get_size_dict(__UpperCAmelCase ) _a = resample if resample is not None else self.resample _a = do_rescale if do_rescale is not None else self.do_rescale _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = do_normalize if do_normalize is not None else self.do_normalize _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = apply_ocr if apply_ocr is not None else self.apply_ocr _a = ocr_lang if ocr_lang is not None else self.ocr_lang _a = tesseract_config if tesseract_config is not None else self.tesseract_config _a = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' ) # All transformations expect numpy arrays. _a = [to_numpy_array(__UpperCAmelCase ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , '''pytesseract''' ) _a = [] _a = [] for image in images: _a , _a = apply_tesseract(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) words_batch.append(__UpperCAmelCase ) boxes_batch.append(__UpperCAmelCase ) if do_resize: _a = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_rescale: _a = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: _a = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] _a = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] _a = BatchFeature(data={'''pixel_values''': images} , tensor_type=__UpperCAmelCase ) if apply_ocr: _a = words_batch _a = boxes_batch return data
153
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> str: _a = '''ylacombe/bark-small''' _a = tempfile.mkdtemp() _a = '''en_speaker_1''' _a = '''This is a test string''' _a = '''speaker_embeddings_path.json''' _a = '''speaker_embeddings''' def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Tuple: return AutoTokenizer.from_pretrained(self.checkpoint , **__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Tuple: shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> str: _a = self.get_tokenizer() _a = BarkProcessor(tokenizer=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) _a = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def _UpperCAmelCase ( self ) -> Optional[Any]: _a = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) _a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _a = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def _UpperCAmelCase ( self ) -> str: _a = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) _a = 35 _a = 2 _a = 8 _a = { '''semantic_prompt''': np.ones(__UpperCAmelCase ), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset _a = processor(text=self.input_string , voice_preset=__UpperCAmelCase ) _a = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file _a = os.path.join(self.tmpdirname , '''file.npz''' ) np.savez(__UpperCAmelCase , **__UpperCAmelCase ) _a = processor(text=self.input_string , voice_preset=__UpperCAmelCase ) _a = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub _a = processor(text=self.input_string , voice_preset=self.voice_preset ) def _UpperCAmelCase ( self ) -> Tuple: _a = self.get_tokenizer() _a = BarkProcessor(tokenizer=__UpperCAmelCase ) _a = processor(text=self.input_string ) _a = tokenizer( self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
153
1
"""simple docstring""" import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __A = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __A = [file for file in filepaths if file != file.lower()] if upper_files: print(F'''{len(upper_files)} files contain uppercase characters:''') print("""\n""".join(upper_files) + """\n""") __A = [file for file in filepaths if """ """ in file] if space_files: print(F'''{len(space_files)} files contain space characters:''') print("""\n""".join(space_files) + """\n""") __A = [file for file in filepaths if """-""" in file] if hyphen_files: print(F'''{len(hyphen_files)} files contain hyphen characters:''') print("""\n""".join(hyphen_files) + """\n""") __A = [file for file in filepaths if os.sep not in file] if nodir_files: print(F'''{len(nodir_files)} files are not in a directory:''') print("""\n""".join(nodir_files) + """\n""") __A = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
293
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A = { """configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""], """configuration_data2vec_text""": [ """DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecTextConfig""", """Data2VecTextOnnxConfig""", ], """configuration_data2vec_vision""": [ """DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecVisionConfig""", """Data2VecVisionOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecAudioForAudioFrameClassification""", """Data2VecAudioForCTC""", """Data2VecAudioForSequenceClassification""", """Data2VecAudioForXVector""", """Data2VecAudioModel""", """Data2VecAudioPreTrainedModel""", ] __A = [ """DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecTextForCausalLM""", """Data2VecTextForMaskedLM""", """Data2VecTextForMultipleChoice""", """Data2VecTextForQuestionAnswering""", """Data2VecTextForSequenceClassification""", """Data2VecTextForTokenClassification""", """Data2VecTextModel""", """Data2VecTextPreTrainedModel""", ] __A = [ """DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecVisionForImageClassification""", """Data2VecVisionForMaskedImageModeling""", """Data2VecVisionForSemanticSegmentation""", """Data2VecVisionModel""", """Data2VecVisionPreTrainedModel""", ] if is_tf_available(): __A = [ """TFData2VecVisionForImageClassification""", """TFData2VecVisionForSemanticSegmentation""", """TFData2VecVisionModel""", """TFData2VecVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
293
1
'''simple docstring''' from __future__ import annotations import typing from collections import Counter def __lowerCamelCase ( __lowerCAmelCase : int ) -> typing.Counter[int]: snake_case = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(__lowerCAmelCase , max_perimeter + 1 ): snake_case = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(__lowerCAmelCase ): snake_case = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def __lowerCamelCase ( __lowerCAmelCase : int = 10_00 ) -> int: snake_case = pythagorean_triple(__lowerCAmelCase ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(F"""Perimeter {solution()} has maximum solutions""")
3
'''simple docstring''' import requests from bsa import BeautifulSoup def __lowerCamelCase ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus" ) -> dict: snake_case = BeautifulSoup(requests.get(__lowerCAmelCase ).text , """html.parser""" ) snake_case = soup.findAll("""h1""" ) snake_case = soup.findAll("""div""" , {"""class""": """maincounter-number"""} ) keys += soup.findAll("""span""" , {"""class""": """panel-title"""} ) values += soup.findAll("""div""" , {"""class""": """number-table-main"""} ) return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCAmelCase , __lowerCAmelCase )} if __name__ == "__main__": print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n") for key, value in world_covidaa_stats().items(): print(F"""{key}\n{value}\n""")
3
1
import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class UpperCAmelCase__ : """simple docstring""" def __init__( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=sys.maxsize ) -> List[Any]: SCREAMING_SNAKE_CASE__ = '''bilinear''' SCREAMING_SNAKE_CASE__ = max_size SCREAMING_SNAKE_CASE__ = short_edge_length def __call__( self : int , __lowerCamelCase : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ = [] for img in imgs: SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = img.shape[:2] # later: provide list and randomly choose index for resize SCREAMING_SNAKE_CASE__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img SCREAMING_SNAKE_CASE__ = size * 1.0 / min(__lowerCamelCase , __lowerCamelCase ) if h < w: SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = size, scale * w else: SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = scale * h, size if max(__lowerCamelCase , __lowerCamelCase ) > self.max_size: SCREAMING_SNAKE_CASE__ = self.max_size * 1.0 / max(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ = newh * scale SCREAMING_SNAKE_CASE__ = neww * scale SCREAMING_SNAKE_CASE__ = int(neww + 0.5 ) SCREAMING_SNAKE_CASE__ = int(newh + 0.5 ) if img.dtype == np.uinta: SCREAMING_SNAKE_CASE__ = Image.fromarray(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) SCREAMING_SNAKE_CASE__ = np.asarray(__lowerCamelCase ) else: SCREAMING_SNAKE_CASE__ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw SCREAMING_SNAKE_CASE__ = nn.functional.interpolate( __lowerCamelCase , (newh, neww) , mode=self.interp_method , align_corners=__lowerCamelCase ).squeeze(0 ) img_augs.append(__lowerCamelCase ) return img_augs class UpperCAmelCase__ : """simple docstring""" def __init__( self : Union[str, Any] , __lowerCamelCase : Any ) -> Dict: SCREAMING_SNAKE_CASE__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) SCREAMING_SNAKE_CASE__ = cfg.INPUT.FORMAT SCREAMING_SNAKE_CASE__ = cfg.SIZE_DIVISIBILITY SCREAMING_SNAKE_CASE__ = cfg.PAD_VALUE SCREAMING_SNAKE_CASE__ = cfg.INPUT.MAX_SIZE_TEST SCREAMING_SNAKE_CASE__ = cfg.MODEL.DEVICE SCREAMING_SNAKE_CASE__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) SCREAMING_SNAKE_CASE__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) SCREAMING_SNAKE_CASE__ = lambda __lowerCamelCase : (x - self.pixel_mean) / self.pixel_std def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> Tuple: SCREAMING_SNAKE_CASE__ = tuple(max(__lowerCamelCase ) for s in zip(*[img.shape for img in images] ) ) SCREAMING_SNAKE_CASE__ = [im.shape[-2:] for im in images] SCREAMING_SNAKE_CASE__ = [ nn.functional.pad( __lowerCamelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(__lowerCamelCase , __lowerCamelCase ) ] return torch.stack(__lowerCamelCase ), torch.tensor(__lowerCamelCase ) def __call__( self : Any , __lowerCamelCase : str , __lowerCamelCase : Tuple=False ) -> List[str]: with torch.no_grad(): if not isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE__ = [images] if single_image: assert len(__lowerCamelCase ) == 1 for i in range(len(__lowerCamelCase ) ): if isinstance(images[i] , torch.Tensor ): images.insert(__lowerCamelCase , images.pop(__lowerCamelCase ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( __lowerCamelCase , torch.as_tensor(img_tensorize(images.pop(__lowerCamelCase ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge SCREAMING_SNAKE_CASE__ = torch.tensor([im.shape[:2] for im in images] ) SCREAMING_SNAKE_CASE__ = self.aug(__lowerCamelCase ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic SCREAMING_SNAKE_CASE__ = [self.normalizer(__lowerCamelCase ) for x in images] # now pad them to do the following operations SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.pad(__lowerCamelCase ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad SCREAMING_SNAKE_CASE__ = torch.true_divide(__lowerCamelCase , __lowerCamelCase ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def UpperCAmelCase_ ( _A , _A ): '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def UpperCAmelCase_ ( _A , _A ): '''simple docstring''' assert torch.isfinite(_A ).all(), "Box tensor contains infinite or NaN!" SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = box_size tensor[:, 0].clamp_(min=0 , max=_A ) tensor[:, 1].clamp_(min=0 , max=_A ) tensor[:, 2].clamp_(min=0 , max=_A ) tensor[:, 3].clamp_(min=0 , max=_A )
314
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self : str ) -> Dict: SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp() # fmt: off SCREAMING_SNAKE_CASE__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) SCREAMING_SNAKE_CASE__ = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowerCamelCase , __lowerCamelCase ) def lowercase_ ( self : Dict , **__lowerCamelCase : Dict ) -> Union[str, Any]: return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def lowercase_ ( self : Optional[Any] , **__lowerCamelCase : Dict ) -> int: return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def lowercase_ ( self : str ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def lowercase_ ( self : List[Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase_ ( self : Optional[int] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = self.get_image_processor() SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCamelCase ) def lowercase_ ( self : Any ) -> int: SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 ) SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCamelCase ) def lowercase_ ( self : List[Any] ) -> List[str]: SCREAMING_SNAKE_CASE__ = self.get_image_processor() SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs() SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' ) SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase_ ( self : Tuple ) -> Optional[int]: SCREAMING_SNAKE_CASE__ = self.get_image_processor() SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = '''lower newer''' SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase_ ( self : Optional[int] ) -> List[Any]: SCREAMING_SNAKE_CASE__ = self.get_image_processor() SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = '''lower newer''' SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs() SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(__lowerCamelCase ): processor() def lowercase_ ( self : Union[str, Any] ) -> int: SCREAMING_SNAKE_CASE__ = self.get_image_processor() SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def lowercase_ ( self : Union[str, Any] ) -> str: SCREAMING_SNAKE_CASE__ = self.get_image_processor() SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = '''lower newer''' SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs() SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
314
1
"""simple docstring""" from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class _a ( _lowerCAmelCase ): UpperCamelCase = 42 UpperCamelCase = 42 if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
357
"""simple docstring""" import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer UpperCamelCase_ ="""bart""" UpperCamelCase_ =True @st.cache(allow_output_mutation=_lowercase ) def a_ ( ): if LOAD_DENSE_INDEX: _UpperCamelCase : Dict = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) _UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) _UpperCamelCase : Union[str, Any] = qar_model.eval() else: _UpperCamelCase , _UpperCamelCase : str = (None, None) if MODEL_TYPE == "bart": _UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) _UpperCamelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) _UpperCamelCase : List[Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) _UpperCamelCase : Dict = sas_model.eval() else: _UpperCamelCase , _UpperCamelCase : List[Any] = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=_lowercase ) def a_ ( ): if LOAD_DENSE_INDEX: _UpperCamelCase : List[Any] = faiss.StandardGpuResources() _UpperCamelCase : List[str] = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] _UpperCamelCase : Tuple = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) _UpperCamelCase : Optional[int] = faiss.IndexFlatIP(128 ) _UpperCamelCase : Tuple = faiss.index_cpu_to_gpu(_lowercase , 1 , _lowercase ) wikiaab_gpu_index_flat.add(_lowercase ) # TODO fix for larger GPU else: _UpperCamelCase , _UpperCamelCase : Tuple = (None, None) _UpperCamelCase : List[Any] = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=_lowercase ) def a_ ( ): _UpperCamelCase : Optional[Any] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) _UpperCamelCase : Any = elia['''train_eli5'''] _UpperCamelCase : Union[str, Any] = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) _UpperCamelCase : str = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(_lowercase ) return (elia_train, eli5_train_q_index) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =load_indexes() UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =load_models() UpperCamelCase_ , UpperCamelCase_ =load_train_data() def a_ ( _lowercase , _lowercase=10 ): _UpperCamelCase : Any = embed_questions_for_retrieval([question] , _lowercase , _lowercase ) _UpperCamelCase , _UpperCamelCase : List[Any] = eli5_train_q_index.search(_lowercase , _lowercase ) _UpperCamelCase : Tuple = [elia_train[int(_lowercase )] for i in I[0]] return nn_examples def a_ ( _lowercase , _lowercase="wiki40b" , _lowercase="dense" , _lowercase=10 ): if source == "none": _UpperCamelCase , _UpperCamelCase : List[str] = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": _UpperCamelCase , _UpperCamelCase : Dict = query_qa_dense_index( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) else: _UpperCamelCase , _UpperCamelCase : List[str] = query_es_index( _lowercase , _lowercase , index_name='''english_wiki40b_snippets_100w''' , n_results=_lowercase , ) _UpperCamelCase : Any = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] _UpperCamelCase : List[Any] = '''question: {} context: {}'''.format(_lowercase , _lowercase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda _lowercase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowercase : None), } ) def a_ ( _lowercase , _lowercase , _lowercase , _lowercase=64 , _lowercase=256 , _lowercase=False , _lowercase=2 , _lowercase=0.95 , _lowercase=0.8 ): with torch.no_grad(): _UpperCamelCase : List[Any] = qa_sas_generate( _lowercase , _lowercase , _lowercase , num_answers=1 , num_beams=_lowercase , min_len=_lowercase , max_len=_lowercase , do_sample=_lowercase , temp=_lowercase , top_p=_lowercase , top_k=_lowercase , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title("""Long Form Question Answering with ELI5""") # Start sidebar UpperCamelCase_ ="""<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>""" UpperCamelCase_ =""" <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class=\"img-container\"> <!-- Inline parent element --> %s </span> </body> </html> """ % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia UpperCamelCase_ =""" This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. """ st.sidebar.markdown(description, unsafe_allow_html=True) UpperCamelCase_ =[ """Answer the question""", """View the retrieved document only""", """View the most similar ELI5 question and answer""", """Show me everything, please!""", ] UpperCamelCase_ =st.sidebar.checkbox("""Demo options""") if demo_options: UpperCamelCase_ =st.sidebar.selectbox( """""", action_list, index=3, ) UpperCamelCase_ =action_list.index(action_st) UpperCamelCase_ =st.sidebar.selectbox( """""", ["""Show full text of passages""", """Show passage section titles"""], index=0, ) UpperCamelCase_ =show_type == """Show full text of passages""" else: UpperCamelCase_ =3 UpperCamelCase_ =True UpperCamelCase_ =st.sidebar.checkbox("""Retrieval options""") if retrieval_options: UpperCamelCase_ =""" ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. """ st.sidebar.markdown(retriever_info) UpperCamelCase_ =st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""]) UpperCamelCase_ =st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""]) else: UpperCamelCase_ ="""wiki40b""" UpperCamelCase_ ="""dense""" UpperCamelCase_ ="""beam""" UpperCamelCase_ =2 UpperCamelCase_ =64 UpperCamelCase_ =256 UpperCamelCase_ =None UpperCamelCase_ =None UpperCamelCase_ =st.sidebar.checkbox("""Generation options""") if generate_options: UpperCamelCase_ =""" ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder's output probabilities. """ st.sidebar.markdown(generate_info) UpperCamelCase_ =st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""]) UpperCamelCase_ =st.sidebar.slider( """Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) UpperCamelCase_ =st.sidebar.slider( """Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": UpperCamelCase_ =st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: UpperCamelCase_ =st.sidebar.slider( """Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) UpperCamelCase_ =st.sidebar.slider( """Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) UpperCamelCase_ =None # start main text UpperCamelCase_ =[ """<MY QUESTION>""", """How do people make chocolate?""", """Why do we get a fever when we are sick?""", """How can different animals perceive different colors?""", """What is natural language processing?""", """What's the best way to treat a sunburn?""", """What exactly are vitamins ?""", """How does nuclear energy provide electricity?""", """What's the difference between viruses and bacteria?""", """Why are flutes classified as woodwinds when most of them are made out of metal ?""", """Why do people like drinking coffee even though it tastes so bad?""", """What happens when wine ages? How does it make the wine taste better?""", """If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""", """How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""", """How does New Zealand have so many large bird predators?""", ] UpperCamelCase_ =st.selectbox( """What would you like to ask? ---- select <MY QUESTION> to enter a new query""", questions_list, index=1, ) if question_s == "<MY QUESTION>": UpperCamelCase_ =st.text_input("""Enter your question here:""", """""") else: UpperCamelCase_ =question_s if st.button("""Show me!"""): if action in [0, 1, 3]: if index_type == "mixed": UpperCamelCase_ , UpperCamelCase_ =make_support(question, source=wiki_source, method="""dense""", n_results=10) UpperCamelCase_ , UpperCamelCase_ =make_support(question, source=wiki_source, method="""sparse""", n_results=10) UpperCamelCase_ =[] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] UpperCamelCase_ =support_list[:10] UpperCamelCase_ ="""<P> """ + """ <P> """.join([res[-1] for res in support_list]) else: UpperCamelCase_ , UpperCamelCase_ =make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: UpperCamelCase_ , UpperCamelCase_ =answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == """sampled"""), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("""### The model generated answer is:""") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""") for i, res in enumerate(support_list): UpperCamelCase_ ="""https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_""")) UpperCamelCase_ =res[1].strip() if sec_titles == "": UpperCamelCase_ ="""[{}]({})""".format(res[0], wiki_url) else: UpperCamelCase_ =sec_titles.split(""" & """) UpperCamelCase_ =""" & """.join( ["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list] ) st.markdown( """{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( """> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True ) if action in [2, 3]: UpperCamelCase_ =find_nearest_training(question) UpperCamelCase_ =nn_train_list[0] st.markdown( """--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""]) ) UpperCamelCase_ =[ """{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""])) for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""])) if i == 0 or sc > 2 ] st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st))) UpperCamelCase_ =""" --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* """ st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
128
0
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowerCamelCase__ ( lowerCamelCase_ , unittest.TestCase ): a__ : Union[str, Any] = BarthezTokenizer a__ : int = BarthezTokenizerFast a__ : List[Any] = True a__ : str = True def lowerCamelCase_ ( self ): """simple docstring""" super().setUp() snake_case : List[str] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=SCREAMING_SNAKE_CASE ) snake_case : Dict = tokenizer def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Any = "<pad>" snake_case : Optional[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 101_122 ) def lowerCamelCase_ ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 101_122 ) @require_torch def lowerCamelCase_ ( self ): """simple docstring""" snake_case : str = ["A long paragraph for summarization.", "Another paragraph for summarization."] snake_case : Optional[int] = [0, 57, 3_018, 70_307, 91, 2] snake_case : Union[str, Any] = self.tokenizer( SCREAMING_SNAKE_CASE , max_length=len(SCREAMING_SNAKE_CASE ) , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , return_tensors="pt" ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) snake_case : Tuple = batch.input_ids.tolist()[0] self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self ): """simple docstring""" if not self.test_rust_tokenizer: return snake_case : Union[str, Any] = self.get_tokenizer() snake_case : List[str] = self.get_rust_tokenizer() snake_case : Optional[int] = "I was born in 92000, and this is falsé." snake_case : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE ) snake_case : Optional[Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) snake_case : int = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) snake_case : int = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) snake_case : Any = self.get_rust_tokenizer() snake_case : Any = tokenizer.encode(SCREAMING_SNAKE_CASE ) snake_case : Union[str, Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Optional[Any] = {"input_ids": [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. snake_case : int = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=SCREAMING_SNAKE_CASE , )
148
"""simple docstring""" # Algorithm for the pigeonhole sorting def UpperCamelCase__ ( lowercase__ : List[str] ): snake_case : Tuple = min(lowercase__ ) # min() finds the minimum value snake_case : int = max(lowercase__ ) # max() finds the maximum value snake_case : List[Any] = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size snake_case : List[Any] = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(lowercase__ , lowercase__ ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. snake_case : Optional[Any] = 0 for count in range(lowercase__ ): while holes[count] > 0: holes[count] -= 1 snake_case : List[str] = count + min_val i += 1 def UpperCamelCase__ ( ): snake_case : Dict = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(lowercase__ ) print("Sorted order is:" , " ".join(lowercase__ ) ) if __name__ == "__main__": main()
148
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer _A = logging.get_logger(__name__) _A = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _A = { """vocab_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt""" ), """google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""", """google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""", }, """tokenizer_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json""" ), """google/realm-orqa-nq-openqa""": ( """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-nq-reader""": ( """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-openqa""": ( """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-reader""": ( """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json""" ), }, } _A = { """google/realm-cc-news-pretrained-embedder""": 5_12, """google/realm-cc-news-pretrained-encoder""": 5_12, """google/realm-cc-news-pretrained-scorer""": 5_12, """google/realm-cc-news-pretrained-openqa""": 5_12, """google/realm-orqa-nq-openqa""": 5_12, """google/realm-orqa-nq-reader""": 5_12, """google/realm-orqa-wq-openqa""": 5_12, """google/realm-orqa-wq-reader""": 5_12, } _A = { """google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-reader""": {"""do_lower_case""": True}, """google/realm-orqa-wq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-wq-reader""": {"""do_lower_case""": True}, } class lowerCamelCase ( a_ ): '''simple docstring''' SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE = RealmTokenizer def __init__(self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase="[UNK]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="[PAD]" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ): """simple docstring""" super().__init__( lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , ) UpperCAmelCase__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , lowercase_ ) != do_lower_case or normalizer_state.get("""strip_accents""" , lowercase_ ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , lowercase_ ) != tokenize_chinese_chars ): UpperCAmelCase__ : List[Any] = getattr(lowercase_ , normalizer_state.pop("""type""" ) ) UpperCAmelCase__ : Any = do_lower_case UpperCAmelCase__ : str = strip_accents UpperCAmelCase__ : Dict = tokenize_chinese_chars UpperCAmelCase__ : Dict = normalizer_class(**lowercase_ ) UpperCAmelCase__ : int = do_lower_case def _a (self , _lowerCamelCase , **_lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : List[str] = PaddingStrategy.MAX_LENGTH UpperCAmelCase__ : Any = text UpperCAmelCase__ : int = kwargs.pop("""text_pair""" , lowercase_ ) UpperCAmelCase__ : Dict = kwargs.pop("""return_tensors""" , lowercase_ ) UpperCAmelCase__ : Union[str, Any] = { """input_ids""": [], """attention_mask""": [], """token_type_ids""": [], } for idx, candidate_text in enumerate(lowercase_ ): if batch_text_pair is not None: UpperCAmelCase__ : List[Any] = batch_text_pair[idx] else: UpperCAmelCase__ : List[str] = None UpperCAmelCase__ : Optional[int] = super().__call__(lowercase_ , lowercase_ , return_tensors=lowercase_ , **lowercase_ ) UpperCAmelCase__ : str = encoded_candidates.get("""input_ids""" ) UpperCAmelCase__ : Any = encoded_candidates.get("""attention_mask""" ) UpperCAmelCase__ : Union[str, Any] = encoded_candidates.get("""token_type_ids""" ) if encoded_input_ids is not None: output_data["input_ids"].append(lowercase_ ) if encoded_attention_mask is not None: output_data["attention_mask"].append(lowercase_ ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(lowercase_ ) UpperCAmelCase__ : Union[str, Any] = {key: item for key, item in output_data.items() if len(lowercase_ ) != 0} return BatchEncoding(lowercase_ , tensor_type=lowercase_ ) def _a (self , _lowerCamelCase , _lowerCamelCase=None ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _a (self , _lowerCamelCase , _lowerCamelCase = None ): """simple docstring""" UpperCAmelCase__ : Any = [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a (self , _lowerCamelCase , _lowerCamelCase = None ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = self._tokenizer.model.save(lowercase_ , name=lowercase_ ) return tuple(lowercase_ )
364
"""simple docstring""" import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self , *_lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ): """simple docstring""" super().__init__(*_lowerCamelCase , **_lowerCamelCase ) UpperCAmelCase__ : List[str] = eval_examples UpperCAmelCase__ : List[Any] = post_process_function def _a (self , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = "eval" , **_lowerCamelCase , ): """simple docstring""" UpperCAmelCase__ : Tuple = gen_kwargs.copy() UpperCAmelCase__ : Optional[Any] = ( gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length ) UpperCAmelCase__ : int = ( gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams ) UpperCAmelCase__ : int = gen_kwargs UpperCAmelCase__ : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset UpperCAmelCase__ : Tuple = self.get_eval_dataloader(_lowerCamelCase ) UpperCAmelCase__ : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase__ : List[str] = self.compute_metrics UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Optional[int] = time.time() UpperCAmelCase__ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: UpperCAmelCase__ : int = eval_loop( _lowerCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , ) finally: UpperCAmelCase__ : Any = compute_metrics UpperCAmelCase__ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default UpperCAmelCase__ : List[str] = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) UpperCAmelCase__ : Optional[Any] = self.compute_metrics(_lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): UpperCAmelCase__ : Tuple = metrics.pop(_lowerCamelCase ) metrics.update(output.metrics ) else: UpperCAmelCase__ : List[str] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_lowerCamelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCAmelCase__ : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCamelCase ) return metrics def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase = "test" , **_lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Optional[int] = gen_kwargs.copy() UpperCAmelCase__ : List[Any] = self.get_test_dataloader(_lowerCamelCase ) # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase__ : Any = self.compute_metrics UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : str = time.time() UpperCAmelCase__ : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: UpperCAmelCase__ : List[str] = eval_loop( _lowerCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , ) finally: UpperCAmelCase__ : int = compute_metrics UpperCAmelCase__ : Any = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output UpperCAmelCase__ : Optional[Any] = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , """predict""" ) UpperCAmelCase__ : List[str] = self.compute_metrics(_lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): UpperCAmelCase__ : str = metrics.pop(_lowerCamelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCamelCase )
166
0
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class lowercase : def __init__( self ,A__ ,A__=1_3 ,A__=1_0 ,A__=3 ,A__=2 ,A__=2 ,A__=2 ,A__=True ,A__=True ,A__=3_2 ,A__=5 ,A__=4 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=1_0 ,A__=0.02 ,A__=0.9 ,A__=None ,): lowercase = parent lowercase = batch_size lowercase = image_size lowercase = num_channels lowercase = patch_size lowercase = tubelet_size lowercase = num_frames lowercase = is_training lowercase = use_labels lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = type_sequence_label_size lowercase = initializer_range lowercase = mask_ratio lowercase = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame lowercase = (image_size // patch_size) ** 2 lowercase = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos lowercase = int(mask_ratio * self.seq_length) def A__ ( self): lowercase = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size]) lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size) lowercase = self.get_config() return config, pixel_values, labels def A__ ( self): return VideoMAEConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,tubelet_size=self.tubelet_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A__ ,initializer_range=self.initializer_range ,) def A__ ( self ,A__ ,A__ ,A__): lowercase = VideoMAEModel(config=A__) model.to(A__) model.eval() lowercase = model(A__) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size)) def A__ ( self ,A__ ,A__ ,A__): lowercase = VideoMAEForPreTraining(A__) model.to(A__) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase = torch.ones((self.num_masks,)) lowercase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))]) lowercase = mask.expand(self.batch_size ,-1).bool() lowercase = model(A__ ,A__) # model only returns predictions for masked patches lowercase = mask.sum().item() lowercase = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_masked_patches, decoder_num_labels)) def A__ ( self): lowercase = self.prepare_config_and_inputs() lowercase , lowercase , lowercase = config_and_inputs lowercase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): lowercase_ : str =( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) lowercase_ : Tuple =( {'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification} if is_torch_available() else {} ) lowercase_ : List[str] =False lowercase_ : str =False lowercase_ : int =False lowercase_ : Dict =False def A__ ( self): lowercase = VideoMAEModelTester(self) lowercase = ConfigTester(self ,config_class=A__ ,has_text_modality=A__ ,hidden_size=3_7) def A__ ( self ,A__ ,A__ ,A__=False): lowercase = copy.deepcopy(A__) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase = torch.ones((self.model_tester.num_masks,)) lowercase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))]) lowercase = mask.expand(self.model_tester.batch_size ,-1).bool() lowercase = bool_masked_pos.to(A__) if return_labels: if model_class in [ *get_values(A__), ]: lowercase = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A__) return inputs_dict def A__ ( self): self.config_tester.run_common_tests() @unittest.skip(reason='''VideoMAE does not use inputs_embeds''') def A__ ( self): pass def A__ ( self): lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(A__) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module)) lowercase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A__ ,nn.Linear)) def A__ ( self): lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(A__) lowercase = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase = [*signature.parameters.keys()] lowercase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,A__) def A__ ( self): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__) def A__ ( self): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*A__) @slow def A__ ( self): for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = VideoMAEModel.from_pretrained(A__) self.assertIsNotNone(A__) def A__ ( self): if not self.has_attentions: pass else: lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() lowercase = True for model_class in self.all_model_classes: lowercase = self.model_tester.seq_length - self.model_tester.num_masks lowercase = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) lowercase = True lowercase = False lowercase = True lowercase = model_class(A__) model.to(A__) model.eval() with torch.no_grad(): lowercase = model(**self._prepare_for_class(A__ ,A__)) lowercase = outputs.attentions self.assertEqual(len(A__) ,self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase = True lowercase = model_class(A__) model.to(A__) model.eval() with torch.no_grad(): lowercase = model(**self._prepare_for_class(A__ ,A__)) lowercase = outputs.attentions self.assertEqual(len(A__) ,self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,) lowercase = len(A__) # Check attention is always last and order is fine lowercase = True lowercase = True lowercase = model_class(A__) model.to(A__) model.eval() with torch.no_grad(): lowercase = model(**self._prepare_for_class(A__ ,A__)) self.assertEqual(out_len + 1 ,len(A__)) lowercase = outputs.attentions self.assertEqual(len(A__) ,self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,) def A__ ( self): def check_hidden_states_output(A__ ,A__ ,A__): lowercase = model_class(A__) model.to(A__) model.eval() with torch.no_grad(): lowercase = model(**self._prepare_for_class(A__ ,A__)) lowercase = outputs.hidden_states lowercase = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(A__) ,A__) lowercase = self.model_tester.seq_length - self.model_tester.num_masks lowercase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]) ,[seq_length, self.model_tester.hidden_size] ,) lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = True check_hidden_states_output(A__ ,A__ ,A__) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase = True check_hidden_states_output(A__ ,A__ ,A__) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''') def A__ ( self): pass def UpperCamelCase ( ): '''simple docstring''' lowercase = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' ) lowercase = np.load(lowerCAmelCase__ ) return list(lowerCAmelCase__ ) @require_torch @require_vision class lowercase ( unittest.TestCase ): @cached_property def A__ ( self): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5]) if is_vision_available() else None ) @slow def A__ ( self): lowercase = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''').to( A__) lowercase = self.default_image_processor lowercase = prepare_video() lowercase = image_processor(A__ ,return_tensors='''pt''').to(A__) # forward pass with torch.no_grad(): lowercase = model(**A__) # verify the logits lowercase = torch.Size((1, 4_0_0)) self.assertEqual(outputs.logits.shape ,A__) lowercase = torch.tensor([0.3669, -0.0688, -0.2421]).to(A__) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A__ ,atol=1E-4)) @slow def A__ ( self): lowercase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''').to(A__) lowercase = self.default_image_processor lowercase = prepare_video() lowercase = image_processor(A__ ,return_tensors='''pt''').to(A__) # add boolean mask, indicating which patches to mask lowercase = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' ,filename='''bool_masked_pos.pt''') lowercase = torch.load(A__) # forward pass with torch.no_grad(): lowercase = model(**A__) # verify the logits lowercase = torch.Size([1, 1_4_0_8, 1_5_3_6]) lowercase = torch.tensor( [[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] ,device=A__) self.assertEqual(outputs.logits.shape ,A__) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,A__ ,atol=1E-4)) # verify the loss (`config.norm_pix_loss` = `True`) lowercase = torch.tensor([0.5142] ,device=A__) self.assertTrue(torch.allclose(outputs.loss ,A__ ,atol=1E-4)) # verify the loss (`config.norm_pix_loss` = `False`) lowercase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ,norm_pix_loss=A__).to( A__) with torch.no_grad(): lowercase = model(**A__) lowercase = torch.tensor(torch.tensor([0.6469]) ,device=A__) self.assertTrue(torch.allclose(outputs.loss ,A__ ,atol=1E-4))
101
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Union[str, Any] = ["vqvae"] def __init__( self : int , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Mel , _UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> str: """simple docstring""" super().__init__() self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , mel=_UpperCAmelCase , vqvae=_UpperCAmelCase ) def a__ ( self : Tuple ) -> int: """simple docstring""" return 50 if isinstance(self.scheduler , _UpperCAmelCase ) else 10_00 @torch.no_grad() def __call__( self : str , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = None , _UpperCAmelCase : np.ndarray = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = None , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : str=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: """simple docstring""" __lowercase = steps or self.get_default_steps() self.scheduler.set_timesteps(_UpperCAmelCase ) __lowercase = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: __lowercase = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: __lowercase = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=_UpperCAmelCase , device=self.device , ) __lowercase = noise __lowercase = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = self.mel.audio_slice_to_image(_UpperCAmelCase ) __lowercase = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape( (input_image.height, input_image.width) ) __lowercase = (input_image / 2_55) * 2 - 1 __lowercase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: __lowercase = self.vqvae.encode(torch.unsqueeze(_UpperCAmelCase , 0 ) ).latent_dist.sample( generator=_UpperCAmelCase )[0] __lowercase = self.vqvae.config.scaling_factor * input_images if start_step > 0: __lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , self.scheduler.timesteps[start_step - 1] ) __lowercase = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) __lowercase = int(mask_start_secs * pixels_per_second ) __lowercase = int(mask_end_secs * pixels_per_second ) __lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , _UpperCAmelCase ): __lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )['sample'] else: __lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample'] if isinstance(self.scheduler , _UpperCAmelCase ): __lowercase = self.scheduler.step( model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample'] else: __lowercase = self.scheduler.step( model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample'] if mask is not None: if mask_start > 0: __lowercase = mask[:, step, :, :mask_start] if mask_end > 0: __lowercase = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance __lowercase = 1 / self.vqvae.config.scaling_factor * images __lowercase = self.vqvae.decode(_UpperCAmelCase )['sample'] __lowercase = (images / 2 + 0.5).clamp(0 , 1 ) __lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() __lowercase = (images * 2_55).round().astype('uint8' ) __lowercase = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_UpperCAmelCase , mode='RGB' ).convert('L' ) for _ in images) ) __lowercase = [self.mel.image_to_audio(_UpperCAmelCase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(_UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_UpperCAmelCase ) ) @torch.no_grad() def a__ ( self : Any , _UpperCAmelCase : List[Image.Image] , _UpperCAmelCase : int = 50 ) -> np.ndarray: """simple docstring""" assert isinstance(self.scheduler , _UpperCAmelCase ) self.scheduler.set_timesteps(_UpperCAmelCase ) __lowercase = np.array( [np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] ) __lowercase = (sample / 2_55) * 2 - 1 __lowercase = torch.Tensor(_UpperCAmelCase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): __lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps __lowercase = self.scheduler.alphas_cumprod[t] __lowercase = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) __lowercase = 1 - alpha_prod_t __lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample'] __lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output __lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) __lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def a__ ( _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : float ) -> torch.Tensor: """simple docstring""" __lowercase = acos(torch.dot(torch.flatten(_UpperCAmelCase ) , torch.flatten(_UpperCAmelCase ) ) / torch.norm(_UpperCAmelCase ) / torch.norm(_UpperCAmelCase ) ) return sin((1 - alpha) * theta ) * xa / sin(_UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(_UpperCAmelCase )
325
0
'''simple docstring''' def _lowerCAmelCase ( lowercase , lowercase = " " ) -> list: __lowerCAmelCase = [] __lowerCAmelCase = 0 for index, char in enumerate(lowercase ): if char == separator: split_words.append(string[last_index:index] ) __lowerCAmelCase = index + 1 elif index + 1 == len(lowercase ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
46
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> List[str]: # Initialise PyTorch model __lowerCAmelCase = BertConfig.from_json_file(lowercase ) print(f'Building PyTorch model from configuration: {config}' ) __lowerCAmelCase = BertForPreTraining(lowercase ) # Load weights from tf checkpoint load_tf_weights_in_bert(lowercase , lowercase , lowercase ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , lowercase ) if __name__ == "__main__": _a : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _a : Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
46
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = None _lowerCamelCase = None _lowerCamelCase = None _lowerCamelCase = None class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase=1 , UpperCamelCase=0 , UpperCamelCase=2 , UpperCamelCase=512 , UpperCamelCase="cls" , UpperCamelCase=False , UpperCamelCase=True , **UpperCamelCase , ): """simple docstring""" super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCamelCase_ = project_dim lowerCamelCase_ = pooler_fn lowerCamelCase_ = learn_encoder lowerCamelCase_ = use_attention_mask class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = [R"pooler", R"logit_scale"] _lowerCamelCase = [R"position_ids", R"predictions.decoder.bias"] _lowerCamelCase = "roberta" _lowerCamelCase = RobertaSeriesConfig def __init__( self , UpperCamelCase ): """simple docstring""" super().__init__(__A ) lowerCamelCase_ = XLMRobertaModel(__A ) lowerCamelCase_ = nn.Linear(config.hidden_size , config.project_dim ) lowerCamelCase_ = getattr(__A , "has_pre_transformation" , __A ) if self.has_pre_transformation: lowerCamelCase_ = nn.Linear(config.hidden_size , config.project_dim ) lowerCamelCase_ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def snake_case ( self , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , ): """simple docstring""" lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase_ = self.base_model( input_ids=__A , attention_mask=__A , token_type_ids=__A , position_ids=__A , head_mask=__A , inputs_embeds=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , output_attentions=__A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__A , ) if self.has_pre_transformation: lowerCamelCase_ = outputs["hidden_states"][-2] lowerCamelCase_ = self.pre_LN(__A ) lowerCamelCase_ = self.transformation_pre(__A ) return TransformationModelOutput( projection_state=__A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: lowerCamelCase_ = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=__A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
55
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if "model" in orig_key: lowerCamelCase : Dict = orig_key.replace("model." , "" ) if "norm1" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("norm1" , "attention.output.LayerNorm" ) if "norm2" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("norm2" , "output.LayerNorm" ) if "norm" in orig_key: lowerCamelCase : Optional[Any] = orig_key.replace("norm" , "LayerNorm" ) if "transformer" in orig_key: lowerCamelCase : int = orig_key.split("." )[0].split("_" )[-1] lowerCamelCase : Dict = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: lowerCamelCase : List[str] = orig_key.replace("mha.attn" , "attention.self" ) if "mha" in orig_key: lowerCamelCase : List[Any] = orig_key.replace("mha" , "attention" ) if "W_q" in orig_key: lowerCamelCase : Optional[int] = orig_key.replace("W_q" , "self.query" ) if "W_k" in orig_key: lowerCamelCase : List[Any] = orig_key.replace("W_k" , "self.key" ) if "W_v" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("W_v" , "self.value" ) if "ff1" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("ff1" , "intermediate.dense" ) if "ff2" in orig_key: lowerCamelCase : Optional[int] = orig_key.replace("ff2" , "output.dense" ) if "ff" in orig_key: lowerCamelCase : Optional[int] = orig_key.replace("ff" , "output.dense" ) if "mlm_class" in orig_key: lowerCamelCase : Dict = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" ) if "mlm" in orig_key: lowerCamelCase : List[Any] = orig_key.replace("mlm" , "cls.predictions.transform" ) if "cls" not in orig_key: lowerCamelCase : int = "yoso." + orig_key return orig_key def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCamelCase : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if ("pooler" in key) or ("sen_class" in key): continue else: lowerCamelCase : Dict = val lowerCamelCase : Dict = orig_state_dict["cls.predictions.decoder.bias"] lowerCamelCase : Dict = torch.arange(SCREAMING_SNAKE_CASE_ ).expand((1, -1) ) + 2 return orig_state_dict def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model_state_dict"] lowerCamelCase : List[str] = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Any = YosoForMaskedLM(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[Any] = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE_ ) print(model.load_state_dict(SCREAMING_SNAKE_CASE_ ) ) model.eval() model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The json file for YOSO model config.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _snake_case = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
283
0
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = [ ("bert.bert", "visual_bert"), ("bert.cls", "cls"), ("bert.classifier", "cls"), ("token_type_embeddings_visual", "visual_token_type_embeddings"), ("position_embeddings_visual", "visual_position_embeddings"), ("projection", "visual_projection"), ] __UpperCamelCase = [ "nlvr2_coco_pre_trained.th", "nlvr2_fine_tuned.th", "nlvr2_pre_trained.th", "vcr_coco_pre_train.th", "vcr_fine_tune.th", "vcr_pre_train.th", "vqa_coco_pre_trained.th", "vqa_fine_tuned.th", "vqa_pre_trained.th", ] def _a ( _lowerCamelCase ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" ) return sd def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=rename_keys_prefix ) -> str: """simple docstring""" __snake_case : Optional[Any] = OrderedDict() __snake_case : Optional[Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __snake_case : Optional[int] = key for name_pair in rename_keys_prefix: __snake_case : str = new_key.replace(name_pair[0] , name_pair[1] ) __snake_case : List[str] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __snake_case : List[Any] = new_d["""cls.predictions.bias"""] return new_d @torch.no_grad() def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" assert ( checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: __snake_case : Tuple = """pretraining""" if "vcr" in checkpoint_path: __snake_case : Tuple = {"""visual_embedding_dim""": 512} elif "vqa_advanced" in checkpoint_path: __snake_case : int = {"""visual_embedding_dim""": 2048} elif "vqa" in checkpoint_path: __snake_case : Optional[int] = {"""visual_embedding_dim""": 2048} elif "nlvr" in checkpoint_path: __snake_case : str = {"""visual_embedding_dim""": 1024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: __snake_case : str = {"""visual_embedding_dim""": 512} __snake_case : Dict = """multichoice""" elif "vqa_advanced" in checkpoint_path: __snake_case : int = {"""visual_embedding_dim""": 2048} __snake_case : Dict = """vqa_advanced""" elif "vqa" in checkpoint_path: __snake_case : Dict = {"""visual_embedding_dim""": 2048, """num_labels""": 3129} __snake_case : List[Any] = """vqa""" elif "nlvr" in checkpoint_path: __snake_case : List[Any] = { """visual_embedding_dim""": 1024, """num_labels""": 2, } __snake_case : int = """nlvr""" __snake_case : Optional[Any] = VisualBertConfig(**_lowerCamelCase ) # Load State Dict __snake_case : List[Any] = load_state_dict(_lowerCamelCase ) __snake_case : Tuple = get_new_dict(_lowerCamelCase , _lowerCamelCase ) if model_type == "pretraining": __snake_case : Tuple = VisualBertForPreTraining(_lowerCamelCase ) elif model_type == "vqa": __snake_case : List[str] = VisualBertForQuestionAnswering(_lowerCamelCase ) elif model_type == "nlvr": __snake_case : Tuple = VisualBertForVisualReasoning(_lowerCamelCase ) elif model_type == "multichoice": __snake_case : Optional[Any] = VisualBertForMultipleChoice(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) # Save Checkpoints Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.") parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.") __UpperCamelCase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
13
'''simple docstring''' from __future__ import annotations def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None: """simple docstring""" __snake_case : int = len(_lowerCamelCase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_lowerCamelCase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , ) def _a ( _lowerCamelCase ) -> None: """simple docstring""" __snake_case : list[list[str]] = [] depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase ) # Print all the boards for board in boards: for column in board: print(_lowerCamelCase ) print("""""" ) print(len(_lowerCamelCase ) , """solutions were found.""" ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
13
1
"""simple docstring""" # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES __UpperCamelCase : Optional[int] = '''tiny-wmt19-en-ru''' # Build # borrowed from a test __UpperCamelCase : Tuple = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] __UpperCamelCase : Dict = dict(zip(vocab, range(len(vocab)))) __UpperCamelCase : int = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: __UpperCamelCase : Optional[Any] = Path(tmpdirname) __UpperCamelCase : Optional[int] = build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] __UpperCamelCase : List[Any] = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] __UpperCamelCase : Any = build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) __UpperCamelCase : List[Any] = FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) __UpperCamelCase : Union[str, Any] = FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=1_0_0_0, tgt_vocab_size=1_0_0_0, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) __UpperCamelCase : Optional[int] = FSMTForConditionalGeneration(config) print(F'''num of params {tiny_model.num_parameters()}''') # Test __UpperCamelCase : int = tokenizer(['''Making tiny model'''], return_tensors='''pt''') __UpperCamelCase : str = tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F'''Generated {mname_tiny}''') # Upload # transformers-cli upload tiny-wmt19-en-ru
106
"""simple docstring""" import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process snake_case__ : Dict = logging.getLogger(__name__) def _snake_case ( _snake_case : Any , _snake_case : Any ): return (preds == labels).mean() @dataclass class snake_case_: __UpperCamelCase = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) __UpperCamelCase = field( default=a__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __UpperCamelCase = field( default=a__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) __UpperCamelCase = field( default=a__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class snake_case_: __UpperCamelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} ) __UpperCamelCase = field(metadata={'''help''': '''Should contain the data files for the task.'''} ) __UpperCamelCase = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __UpperCamelCase = field( default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCAmelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[int] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , _snake_case ) # Set seed set_seed(training_args.seed ) try: lowerCAmelCase : Tuple = processors[data_args.task_name]() lowerCAmelCase : Any = processor.get_labels() lowerCAmelCase : Union[str, Any] = len(_snake_case ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowerCAmelCase : List[str] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , ) # Get datasets lowerCAmelCase : Dict = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowerCAmelCase : Any = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(_snake_case : EvalPrediction ) -> Dict: lowerCAmelCase : int = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(_snake_case , p.label_ids )} # Data collator lowerCAmelCase : List[Any] = DataCollatorWithPadding(_snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowerCAmelCase : Union[str, Any] = Trainer( model=_snake_case , args=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , compute_metrics=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowerCAmelCase : int = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowerCAmelCase : Any = trainer.evaluate() lowerCAmelCase : int = os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(_snake_case , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , _snake_case , _snake_case ) writer.write('''%s = %s\n''' % (key, value) ) results.update(_snake_case ) return results def _snake_case ( _snake_case : List[str] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
60
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A : str =logging.get_logger(__name__) _A : str ={ '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class _lowercase ( _lowercase ): a = """levit""" def __init__( self: str , UpperCamelCase__: str=224 , UpperCamelCase__: str=3 , UpperCamelCase__: Union[str, Any]=3 , UpperCamelCase__: Tuple=2 , UpperCamelCase__: str=1 , UpperCamelCase__: List[Any]=16 , UpperCamelCase__: Any=[128, 256, 384] , UpperCamelCase__: Any=[4, 8, 12] , UpperCamelCase__: List[Any]=[4, 4, 4] , UpperCamelCase__: List[str]=[16, 16, 16] , UpperCamelCase__: Dict=0 , UpperCamelCase__: Dict=[2, 2, 2] , UpperCamelCase__: Tuple=[2, 2, 2] , UpperCamelCase__: int=0.02 , **UpperCamelCase__: Tuple , ): super().__init__(**UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = image_size lowerCamelCase__ : Tuple = num_channels lowerCamelCase__ : Dict = kernel_size lowerCamelCase__ : Union[str, Any] = stride lowerCamelCase__ : int = padding lowerCamelCase__ : Dict = hidden_sizes lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : Any = depths lowerCamelCase__ : List[Any] = key_dim lowerCamelCase__ : List[str] = drop_path_rate lowerCamelCase__ : Optional[Any] = patch_size lowerCamelCase__ : Tuple = attention_ratio lowerCamelCase__ : Dict = mlp_ratio lowerCamelCase__ : Tuple = initializer_range lowerCamelCase__ : Union[str, Any] = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class _lowercase ( _lowercase ): a = version.parse("""1.11""" ) @property def lowerCamelCase_ ( self: Any ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self: Tuple ): return 1e-4
361
'''simple docstring''' import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) _A : str ={ '''sample_size''': 32, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': 1_000, '''block_out_channels''': [32, 64], '''attention_head_dim''': 8, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } _A : Union[str, Any] ={ '''sample_size''': 64, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 3, '''num_class_embeds''': 1_000, '''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } _A : Dict ={ '''sample_size''': 256, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': None, '''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''default''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } _A : Dict ={ '''num_train_timesteps''': 40, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } _A : str ={ '''num_train_timesteps''': 201, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } _A : int ={ '''num_train_timesteps''': 151, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict: if isinstance(UpperCamelCase , UpperCamelCase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError("""boolean value expected""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ) -> Any: lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.0.weight'''] lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.in_layers.0.bias'''] lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.2.weight'''] lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.2.bias'''] lowerCamelCase__ : Optional[Any] = checkpoint[f'''{old_prefix}.emb_layers.1.weight'''] lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.emb_layers.1.bias'''] lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.out_layers.0.weight'''] lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.out_layers.0.bias'''] lowerCamelCase__ : str = checkpoint[f'''{old_prefix}.out_layers.3.weight'''] lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.out_layers.3.bias'''] if has_skip: lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.skip_connection.weight'''] lowerCamelCase__ : List[Any] = checkpoint[f'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> str: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 ) lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.norm.weight'''] lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.norm.bias'''] lowerCamelCase__ : List[Any] = weight_q.squeeze(-1 ).squeeze(-1 ) lowerCamelCase__ : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 ) lowerCamelCase__ : Any = weight_k.squeeze(-1 ).squeeze(-1 ) lowerCamelCase__ : Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 ) lowerCamelCase__ : Dict = weight_v.squeeze(-1 ).squeeze(-1 ) lowerCamelCase__ : Union[str, Any] = bias_v.squeeze(-1 ).squeeze(-1 ) lowerCamelCase__ : Optional[Any] = ( checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCamelCase__ : str = torch.load(UpperCamelCase , map_location="""cpu""" ) lowerCamelCase__ : Optional[int] = {} lowerCamelCase__ : Optional[int] = checkpoint["""time_embed.0.weight"""] lowerCamelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""] lowerCamelCase__ : int = checkpoint["""time_embed.2.weight"""] lowerCamelCase__ : Optional[Any] = checkpoint["""time_embed.2.bias"""] if unet_config["num_class_embeds"] is not None: lowerCamelCase__ : Optional[Any] = checkpoint["""label_emb.weight"""] lowerCamelCase__ : Tuple = checkpoint["""input_blocks.0.0.weight"""] lowerCamelCase__ : List[str] = checkpoint["""input_blocks.0.0.bias"""] lowerCamelCase__ : Optional[Any] = unet_config["""down_block_types"""] lowerCamelCase__ : Any = unet_config["""layers_per_block"""] lowerCamelCase__ : Any = unet_config["""attention_head_dim"""] lowerCamelCase__ : List[Any] = unet_config["""block_out_channels"""] lowerCamelCase__ : str = 1 lowerCamelCase__ : str = channels_list[0] for i, layer_type in enumerate(UpperCamelCase ): lowerCamelCase__ : List[Any] = channels_list[i] lowerCamelCase__ : List[Any] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(UpperCamelCase ): lowerCamelCase__ : int = f'''down_blocks.{i}.resnets.{j}''' lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.0''' lowerCamelCase__ : Tuple = True if j == 0 and downsample_block_has_skip else False lowerCamelCase__ : List[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(UpperCamelCase ): lowerCamelCase__ : Tuple = f'''down_blocks.{i}.resnets.{j}''' lowerCamelCase__ : Optional[Any] = f'''input_blocks.{current_layer}.0''' lowerCamelCase__ : str = True if j == 0 and downsample_block_has_skip else False lowerCamelCase__ : Union[str, Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase ) lowerCamelCase__ : Any = f'''down_blocks.{i}.attentions.{j}''' lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.1''' lowerCamelCase__ : Tuple = convert_attention( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) current_layer += 1 if i != len(UpperCamelCase ) - 1: lowerCamelCase__ : Tuple = f'''down_blocks.{i}.downsamplers.0''' lowerCamelCase__ : str = f'''input_blocks.{current_layer}.0''' lowerCamelCase__ : Union[str, Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) current_layer += 1 lowerCamelCase__ : Union[str, Any] = current_channels # hardcoded the mid-block for now lowerCamelCase__ : Any = """mid_block.resnets.0""" lowerCamelCase__ : Optional[Any] = """middle_block.0""" lowerCamelCase__ : int = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : List[Any] = """mid_block.attentions.0""" lowerCamelCase__ : Dict = """middle_block.1""" lowerCamelCase__ : int = convert_attention(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Any = """mid_block.resnets.1""" lowerCamelCase__ : Tuple = """middle_block.2""" lowerCamelCase__ : int = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Union[str, Any] = 0 lowerCamelCase__ : Any = unet_config["""up_block_types"""] for i, layer_type in enumerate(UpperCamelCase ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): lowerCamelCase__ : int = f'''up_blocks.{i}.resnets.{j}''' lowerCamelCase__ : Optional[Any] = f'''output_blocks.{current_layer}.0''' lowerCamelCase__ : Any = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase ) current_layer += 1 if i != len(UpperCamelCase ) - 1: lowerCamelCase__ : Dict = f'''up_blocks.{i}.upsamplers.0''' lowerCamelCase__ : List[str] = f'''output_blocks.{current_layer-1}.1''' lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): lowerCamelCase__ : str = f'''up_blocks.{i}.resnets.{j}''' lowerCamelCase__ : List[Any] = f'''output_blocks.{current_layer}.0''' lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase ) lowerCamelCase__ : Optional[Any] = f'''up_blocks.{i}.attentions.{j}''' lowerCamelCase__ : Any = f'''output_blocks.{current_layer}.1''' lowerCamelCase__ : Optional[int] = convert_attention( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) current_layer += 1 if i != len(UpperCamelCase ) - 1: lowerCamelCase__ : Tuple = f'''up_blocks.{i}.upsamplers.0''' lowerCamelCase__ : Tuple = f'''output_blocks.{current_layer-1}.2''' lowerCamelCase__ : List[str] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Dict = checkpoint["""out.0.weight"""] lowerCamelCase__ : Dict = checkpoint["""out.0.bias"""] lowerCamelCase__ : Dict = checkpoint["""out.2.weight"""] lowerCamelCase__ : Tuple = checkpoint["""out.2.bias"""] return new_checkpoint if __name__ == "__main__": _A : Tuple =argparse.ArgumentParser() parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''') parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.''' ) parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''') _A : Tuple =parser.parse_args() _A : Optional[int] =strabool(args.class_cond) _A : List[str] =os.path.basename(args.unet_path) print(F'Checkpoint: {ckpt_name}') # Get U-Net config if "imagenet64" in ckpt_name: _A : int =IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): _A : Tuple =LSUN_256_UNET_CONFIG elif "test" in ckpt_name: _A : Any =TEST_UNET_CONFIG else: raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.') if not args.class_cond: _A : str =None _A : Optional[int] =con_pt_to_diffuser(args.unet_path, unet_config) _A : Optional[int] =UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: _A : Tuple =CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: _A : int =CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): _A : Union[str, Any] =CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.') _A : str =CMStochasticIterativeScheduler(**scheduler_config) _A : Optional[Any] =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
129
0
"""simple docstring""" def lowerCamelCase__ ( _lowerCamelCase : List[str] ) -> Optional[int]: lowerCamelCase_ = len(_lowerCamelCase ) for i in range(length - 1 ): lowerCamelCase_ = i for k in range(i + 1 , _lowerCamelCase ): if collection[k] < collection[least]: lowerCamelCase_ = k if least != i: lowerCamelCase_ , lowerCamelCase_ = (collection[i], collection[least]) return collection if __name__ == "__main__": _SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip() _SCREAMING_SNAKE_CASE : Optional[Any] = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
183
"""simple docstring""" from cva import destroyAllWindows, imread, imshow, waitKey def lowerCamelCase__ ( _lowerCamelCase : Tuple ) -> Dict: # getting number of pixels in the image lowerCamelCase_ , lowerCamelCase_ = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(_lowerCamelCase ): for j in range(_lowerCamelCase ): lowerCamelCase_ = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image _SCREAMING_SNAKE_CASE : List[Any] = imread('''image_data/lena.jpg''', 1) # convert to its negative _SCREAMING_SNAKE_CASE : List[Any] = convert_to_negative(img) # show result image imshow('''negative of original image''', img) waitKey(0) destroyAllWindows()
183
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A__ = logging.get_logger(__name__) A__ = { '''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''', '''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''', '''kssteven/ibert-roberta-large-mnli''': ( '''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json''' ), } class a ( __lowerCamelCase ): __lowerCAmelCase : Optional[Any] = """ibert""" def __init__( self :Tuple ,__lowercase :Dict=3_0_5_2_2 ,__lowercase :Dict=7_6_8 ,__lowercase :Any=1_2 ,__lowercase :Dict=1_2 ,__lowercase :Dict=3_0_7_2 ,__lowercase :Dict="gelu" ,__lowercase :Optional[Any]=0.1 ,__lowercase :Tuple=0.1 ,__lowercase :Union[str, Any]=5_1_2 ,__lowercase :Optional[int]=2 ,__lowercase :Dict=0.02 ,__lowercase :Tuple=1e-1_2 ,__lowercase :str=1 ,__lowercase :int=0 ,__lowercase :Optional[int]=2 ,__lowercase :Union[str, Any]="absolute" ,__lowercase :Tuple=False ,__lowercase :int="none" ,**__lowercase :List[Any] ,): super().__init__(pad_token_id=__lowercase ,bos_token_id=__lowercase ,eos_token_id=__lowercase ,**__lowercase ) snake_case__ : str = vocab_size snake_case__ : int = hidden_size snake_case__ : Any = num_hidden_layers snake_case__ : Tuple = num_attention_heads snake_case__ : Any = hidden_act snake_case__ : Dict = intermediate_size snake_case__ : str = hidden_dropout_prob snake_case__ : Tuple = attention_probs_dropout_prob snake_case__ : Optional[Any] = max_position_embeddings snake_case__ : Tuple = type_vocab_size snake_case__ : Dict = initializer_range snake_case__ : List[Any] = layer_norm_eps snake_case__ : int = position_embedding_type snake_case__ : List[Any] = quant_mode snake_case__ : Union[str, Any] = force_dequant class a ( __lowerCamelCase ): @property def __lowerCamelCase ( self :Dict ): if self.task == "multiple-choice": snake_case__ : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: snake_case__ : Optional[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
361
def _lowerCAmelCase ( __lowerCAmelCase ) -> list: """simple docstring""" for i in range(len(__lowerCAmelCase ) - 1 , 0 , -1 ): snake_case__ : List[Any] = False for j in range(__lowerCAmelCase , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: snake_case__ , snake_case__ : Optional[int] = unsorted[j - 1], unsorted[j] snake_case__ : Any = True for j in range(__lowerCAmelCase ): if unsorted[j] > unsorted[j + 1]: snake_case__ , snake_case__ : Tuple = unsorted[j + 1], unsorted[j] snake_case__ : int = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() A__ = input('''Enter numbers separated by a comma:\n''').strip() A__ = [int(item) for item in user_input.split(''',''')] print(f"""{cocktail_shaker_sort(unsorted) = }""")
44
0