code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import argparse import copy def _snake_case ( snake_case__ : Dict ): A = {} with open(snake_case__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: A = [] _list.append([line.split()[1], line.split()[2]] ) A = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: A = [] _list.append([line.split()[0], line.split()[2]] ) A = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ): with open(snake_case__ ) as f: A = f.read(1 ) A = start_node A = [] A = start_node A = 0 while visiting not in first_solution: A = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(snake_case__ ) and k[0] not in first_solution: A = k[1] A = k[0] first_solution.append(snake_case__ ) A = distance_of_first_solution + int(snake_case__ ) A = best_node first_solution.append(snake_case__ ) A = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 A = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Tuple ): A = [] for n in solution[1:-1]: A = solution.index(snake_case__ ) for kn in solution[1:-1]: A = solution.index(snake_case__ ) if n == kn: continue A = copy.deepcopy(snake_case__ ) A = kn A = n A = 0 for k in _tmp[:-1]: A = _tmp[_tmp.index(snake_case__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: A = distance + int(i[1] ) _tmp.append(snake_case__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) A = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda snake_case__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def _snake_case ( snake_case__ : str , snake_case__ : Any , snake_case__ : Any , snake_case__ : Dict , snake_case__ : List[str] ): A = 1 A = first_solution A = [] A = distance_of_first_solution A = solution while count <= iters: A = find_neighborhood(snake_case__ , snake_case__ ) A = 0 A = neighborhood[index_of_best_solution] A = len(snake_case__ ) - 1 A = False while not found: A = 0 while i < len(snake_case__ ): if best_solution[i] != solution[i]: A = best_solution[i] A = solution[i] break A = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) A = True A = best_solution[:-1] A = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: A = cost A = solution else: A = index_of_best_solution + 1 A = neighborhood[index_of_best_solution] if len(snake_case__ ) >= size: tabu_list.pop(0 ) A = count + 1 return best_solution_ever, best_cost def _snake_case ( snake_case__ : Tuple=None ): A = generate_neighbours(args.File ) A , A = generate_first_solution( args.File , snake_case__ ) A , A = tabu_search( snake_case__ , snake_case__ , snake_case__ , args.Iterations , args.Size , ) print(F'Best solution: {best_sol}, with total distance: {best_cost}.' ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser(description='''Tabu Search''') parser.add_argument( '''-f''', '''--File''', type=str, help='''Path to the file containing the data''', required=True, ) parser.add_argument( '''-i''', '''--Iterations''', type=int, help='''How many iterations the algorithm should perform''', required=True, ) parser.add_argument( '''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True ) # Pass the arguments to main method main(parser.parse_args())
74
"""simple docstring""" import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed _lowercase = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) _lowercase = '''sshleifer/student_marian_en_ro_6_1''' _lowercase = '''sshleifer/tiny-mbart''' @require_torch class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any]=False ,A_ : Optional[int]=None ,A_ : List[str]=True ,A_ : Tuple=True ,A_ : Union[str, Any]=True ,A_ : List[str]=True ,) -> Tuple: A = self.run_trainer( eval_steps=1 ,max_len=12 ,model_name=A_ ,num_train_epochs=1 ,distributed=A_ ,extra_args_str=A_ ,predict_with_generate=A_ ,do_train=A_ ,do_eval=A_ ,do_predict=A_ ,) A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history if not do_eval: return A = [log for log in logs if 'eval_loss' in log.keys()] A = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats A = eval_metrics[-1] assert isinstance(last_step_stats['eval_bleu'] ,A_ ) assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: self.run_seqaseq_quick() @require_torch_multi_gpu def _SCREAMING_SNAKE_CASE ( self : int ) -> int: self.run_seqaseq_quick(distributed=A_ ) @require_torch_multi_gpu def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: self.run_seqaseq_quick(distributed=A_ ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple' ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple --fp16' ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2' ,predict_with_generate=A_ ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: self.run_seqaseq_quick( distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2 --fp16' ,predict_with_generate=A_ ) @require_apex @require_torch_gpu def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' ) @parameterized.expand(['base', 'low', 'high', 'mixed'] ) @require_torch_multi_gpu def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Dict ) -> List[str]: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout A = { # test with the default log_level - should be info and thus log info once 'base': {'extra_args_str': '', 'n_matches': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes 'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica 'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1}, # test with high log_level and log_level_replica - should be quiet on all processes 'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0}, } A = experiments[experiment_id] A = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False} A = 'Running training' with CaptureStderr() as cl: self.run_seqaseq_quick(**A_ ,extra_args_str=data['extra_args_str'] ) A = len(re.findall(A_ ,cl.err ) ) self.assertEqual(A_ ,data['n_matches'] ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: A = self.run_trainer( eval_steps=2 ,max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=10 ,distributed=A_ ,) # Check metrics A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history A = [log for log in logs if 'eval_loss' in log.keys()] A = eval_metrics[0] A = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['eval_bleu'] ,A_ ) # test if do_predict saves generations and metrics A = os.listdir(A_ ) A = {os.path.basename(A_ ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: from transformers.training_args import OptimizerNames def train_and_return_metrics(A_ : str ) -> Tuple[int, float]: A = '--skip_memory_metrics 0' A = self.run_trainer( max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=1 ,optim=A_ ,distributed=A_ ,extra_args_str=A_ ,do_eval=A_ ,do_predict=A_ ,n_gpus_to_use=1 ,) # Check metrics A = TrainerState.load_from_json(Path(A_ ,'trainer_state.json' ) ).log_history A = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 ) A = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 ) A = logs[0]['train_loss'] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) A = gpu_alloc_mem_orig - gpu_alloc_mem_bnb A = gpu_peak_mem_orig + gpu_alloc_mem_orig A = gpu_peak_mem_bnb + gpu_alloc_mem_bnb A = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings A = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( A_ ,A_ ,'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got' F' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and' F' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' ,) self.assertGreater( A_ ,A_ ,'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got' F' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and' F' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' ,) self.assertEqual( A_ ,A_ ,F'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ,A_ : str ,A_ : int ,A_ : float = 3e-3 ,A_ : str = "adafactor" ,A_ : bool = False ,A_ : str = None ,A_ : int = 0 ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : int = None ,) -> Dict: A = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro' A = self.get_auto_remove_tmp_dir() A = F'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split() A = F'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A_ )}\n '.split() A = '\n --do_predict\n '.split() A = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F'--optim {optim}'.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: A = get_gpu_count() A = get_torch_dist_unique_port() A = F'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split() A = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(A_ ,env=self.get_env() ) else: A = ['run_translation.py'] + args with patch.object(A_ ,'argv' ,A_ ): main() return output_dir
74
1
"""simple docstring""" class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int ,A_ : int ) -> Union[str, Any]: A = n A = [None] * self.n A = 0 # index of the first element A = 0 A = 0 def __len__( self : int ) -> int: return self.size def _SCREAMING_SNAKE_CASE ( self : Any ) -> bool: return self.size == 0 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: return False if self.is_empty() else self.array[self.front] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> int: if self.size >= self.n: raise Exception('QUEUE IS FULL' ) A = data A = (self.rear + 1) % self.n self.size += 1 return self def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: if self.size == 0: raise Exception('UNDERFLOW' ) A = self.array[self.front] A = None A = (self.front + 1) % self.n self.size -= 1 return temp
74
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''facebook/deit-base-distilled-patch16-224''': ( '''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json''' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''deit''' def __init__( self : int ,A_ : Optional[Any]=768 ,A_ : Union[str, Any]=12 ,A_ : Dict=12 ,A_ : int=3072 ,A_ : Optional[Any]="gelu" ,A_ : Dict=0.0 ,A_ : Any=0.0 ,A_ : str=0.02 ,A_ : Tuple=1e-12 ,A_ : Union[str, Any]=224 ,A_ : Optional[Any]=16 ,A_ : List[Any]=3 ,A_ : Optional[Any]=True ,A_ : Optional[int]=16 ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(**A_ ) A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = encoder_stride class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: int = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> float: return 1e-4
74
1
"""simple docstring""" import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Dict = '''''' _lowerCamelCase: str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _lowerCamelCase: str = None # compression type in fsspec. ex: "gzip" _lowerCamelCase: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : Union[str, Any] ,A_ : str = "" ,A_ : Optional[str] = None ,A_ : Optional[dict] = None ,**A_ : int ) -> Optional[int]: super().__init__(self ,**A_ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode A = fsspec.open( A_ ,mode='rb' ,protocol=A_ ,compression=self.compression ,client_kwargs={ 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459 'trust_env': True, # Enable reading proxy env variables. **(target_options or {}).pop('client_kwargs' ,{} ), # To avoid issues if it was already passed. } ,**(target_options or {}) ,) A = os.path.basename(self.file.path.split('::' )[0] ) A = ( self.compressed_name[: self.compressed_name.rindex('.' )] if '.' in self.compressed_name else self.compressed_name ) A = None @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Union[str, Any] ) -> Dict: # compressed file paths are always relative to the archive root return super()._strip_protocol(A_ ).lstrip('/' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: if self.dir_cache is None: A = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name} A = {f['name']: f} def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : str ) -> Dict: return self.file.open().read() def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : str ,A_ : str = "rb" ,A_ : List[str]=None ,A_ : Optional[int]=True ,A_ : Dict=None ,**A_ : int ,) -> Tuple: A = self._strip_protocol(A_ ) if mode != "rb": raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' ) return self.file.open() class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Any = '''bz2''' _lowerCamelCase: Tuple = '''bz2''' _lowerCamelCase: Tuple = '''.bz2''' class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = '''gzip''' _lowerCamelCase: str = '''gzip''' _lowerCamelCase: Dict = '''.gz''' class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Any = '''lz4''' _lowerCamelCase: Dict = '''lz4''' _lowerCamelCase: List[Any] = '''.lz4''' class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[Any] = '''xz''' _lowerCamelCase: Tuple = '''xz''' _lowerCamelCase: List[str] = '''.xz''' class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Any = '''zstd''' _lowerCamelCase: int = '''zstd''' _lowerCamelCase: str = '''.zst''' def __init__( self : Optional[Any] ,A_ : str ,A_ : str = "rb" ,A_ : Optional[str] = None ,A_ : Optional[dict] = None ,A_ : int = DEFAULT_BLOCK_SIZE ,**A_ : int ,) -> int: super().__init__( fo=A_ ,mode=A_ ,target_protocol=A_ ,target_options=A_ ,block_size=A_ ,**A_ ,) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 A = self.file.__enter__ class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ,A_ : int ) -> Any: A = file_ def __enter__( self : Union[str, Any] ) -> Tuple: self._file.__enter__() return self def __exit__( self : Dict ,*A_ : Dict ,**A_ : List[str] ) -> int: self._file.__exit__(*A_ ,**A_ ) def __iter__( self : Optional[int] ) -> Optional[int]: return iter(self._file ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: return next(self._file ) def __getattr__( self : Optional[Any] ,A_ : int ) -> List[Any]: return getattr(self._file ,A_ ) def fixed_enter(*A_ : List[Any] ,**A_ : List[str] ): return WrappedFile(_enter(*A_ ,**A_ ) ) A = fixed_enter
74
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int]=0.999 , snake_case__ : Union[str, Any]="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case__ : Union[str, Any] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case__ : Dict ): return math.exp(t * -12.0 ) else: raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' ) A = [] for i in range(snake_case__ ): A = i / num_diffusion_timesteps A = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) ) return torch.tensor(snake_case__ , dtype=torch.floataa ) class lowerCAmelCase_ ( _lowercase , _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = [e.name for e in KarrasDiffusionSchedulers] _lowerCamelCase: Optional[Any] = 2 @register_to_config def __init__( self : str ,A_ : int = 1000 ,A_ : float = 0.0_00_85 ,A_ : float = 0.0_12 ,A_ : str = "linear" ,A_ : Optional[Union[np.ndarray, List[float]]] = None ,A_ : str = "epsilon" ,A_ : Optional[bool] = False ,A_ : Optional[bool] = False ,A_ : float = 1.0 ,A_ : str = "linspace" ,A_ : int = 0 ,) -> List[str]: if trained_betas is not None: A = torch.tensor(A_ ,dtype=torch.floataa ) elif beta_schedule == "linear": A = torch.linspace(A_ ,A_ ,A_ ,dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. A = ( torch.linspace(beta_start**0.5 ,beta_end**0.5 ,A_ ,dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule A = betas_for_alpha_bar(A_ ,alpha_transform_type='cosine' ) elif beta_schedule == "exp": A = betas_for_alpha_bar(A_ ,alpha_transform_type='exp' ) else: raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' ) A = 1.0 - self.betas A = torch.cumprod(self.alphas ,dim=0 ) # set all values self.set_timesteps(A_ ,A_ ,A_ ) A = use_karras_sigmas def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : Tuple=None ) -> Tuple: if schedule_timesteps is None: A = self.timesteps A = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: A = 1 if len(A_ ) > 1 else 0 else: A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep A = self._index_counter[timestep_int] return indices[pos].item() @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : torch.FloatTensor ,A_ : Union[float, torch.FloatTensor] ,) -> torch.FloatTensor: A = self.index_for_timestep(A_ ) A = self.sigmas[step_index] A = sample / ((sigma**2 + 1) ** 0.5) return sample def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : Union[str, torch.device] = None ,A_ : Optional[int] = None ,) -> Optional[Any]: A = num_inference_steps A = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": A = np.linspace(0 ,num_train_timesteps - 1 ,A_ ,dtype=A_ )[::-1].copy() elif self.config.timestep_spacing == "leading": A = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 A = (np.arange(0 ,A_ ) * step_ratio).round()[::-1].copy().astype(A_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": A = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 A = (np.arange(A_ ,0 ,-step_ratio )).round().copy().astype(A_ ) timesteps -= 1 else: raise ValueError( F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' ) A = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) A = np.log(A_ ) A = np.interp(A_ ,np.arange(0 ,len(A_ ) ) ,A_ ) if self.config.use_karras_sigmas: A = self._convert_to_karras(in_sigmas=A_ ,num_inference_steps=self.num_inference_steps ) A = np.array([self._sigma_to_t(A_ ,A_ ) for sigma in sigmas] ) A = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) A = torch.from_numpy(A_ ).to(device=A_ ) A = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) A = torch.from_numpy(A_ ) A = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(A_ ).startswith('mps' ): # mps does not support float64 A = timesteps.to(A_ ,dtype=torch.floataa ) else: A = timesteps.to(device=A_ ) # empty dt and derivative A = None A = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter A = defaultdict(A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : List[str] ) -> Dict: # get log sigma A = np.log(A_ ) # get distribution A = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range A = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) A = low_idx + 1 A = log_sigmas[low_idx] A = log_sigmas[high_idx] # interpolate sigmas A = (low - log_sigma) / (low - high) A = np.clip(A_ ,0 ,1 ) # transform interpolation to time range A = (1 - w) * low_idx + w * high_idx A = t.reshape(sigma.shape ) return t def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : torch.FloatTensor ,A_ : int ) -> torch.FloatTensor: A = in_sigmas[-1].item() A = in_sigmas[0].item() A = 7.0 # 7.0 is the value used in the paper A = np.linspace(0 ,1 ,A_ ) A = sigma_min ** (1 / rho) A = sigma_max ** (1 / rho) A = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: return self.dt is None def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : Union[float, torch.FloatTensor] ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : bool = True ,) -> Union[SchedulerOutput, Tuple]: A = self.index_for_timestep(A_ ) # advance index counter by 1 A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: A = self.sigmas[step_index] A = self.sigmas[step_index + 1] else: # 2nd order / Heun's method A = self.sigmas[step_index - 1] A = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API A = 0 A = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": A = sigma_hat if self.state_in_first_order else sigma_next A = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": A = sigma_hat if self.state_in_first_order else sigma_next A = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": A = model_output else: raise ValueError( F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' ) if self.config.clip_sample: A = pred_original_sample.clamp( -self.config.clip_sample_range ,self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order A = (sample - pred_original_sample) / sigma_hat # 3. delta timestep A = sigma_next - sigma_hat # store for 2nd order step A = derivative A = dt A = sample else: # 2. 2nd order / Heun's method A = (sample - pred_original_sample) / sigma_next A = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample A = self.dt A = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" A = None A = None A = None A = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=A_ ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples A = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(A_ ): # mps does not support float64 A = self.timesteps.to(original_samples.device ,dtype=torch.floataa ) A = timesteps.to(original_samples.device ,dtype=torch.floataa ) else: A = self.timesteps.to(original_samples.device ) A = timesteps.to(original_samples.device ) A = [self.index_for_timestep(A_ ,A_ ) for t in timesteps] A = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): A = sigma.unsqueeze(-1 ) A = original_samples + noise * sigma return noisy_samples def __len__( self : Dict ) -> int: return self.config.num_train_timesteps
74
1
"""simple docstring""" import itertools import string from collections.abc import Generator, Iterable def _snake_case ( snake_case__ : Iterable[str] , snake_case__ : int ): A = iter(snake_case__ ) while True: A = tuple(itertools.islice(snake_case__ , snake_case__ ) ) if not chunk: return yield chunk def _snake_case ( snake_case__ : str ): A = ''.join([c.upper() for c in dirty if c in string.ascii_letters] ) A = '' if len(snake_case__ ) < 2: return dirty for i in range(len(snake_case__ ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(snake_case__ ) & 1: clean += "X" return clean def _snake_case ( snake_case__ : str ): # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) A = 'ABCDEFGHIKLMNOPQRSTUVWXYZ' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler A = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(snake_case__ ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(snake_case__ ) return table def _snake_case ( snake_case__ : str , snake_case__ : str ): A = generate_table(snake_case__ ) A = prepare_input(snake_case__ ) A = '' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(snake_case__ , 2 ): A , A = divmod(table.index(snake_case__ ) , 5 ) A , A = divmod(table.index(snake_case__ ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def _snake_case ( snake_case__ : str , snake_case__ : str ): A = generate_table(snake_case__ ) A = '' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(snake_case__ , 2 ): A , A = divmod(table.index(snake_case__ ) , 5 ) A , A = divmod(table.index(snake_case__ ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
74
"""simple docstring""" class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Dict ,A_ : list[int] ) -> None: A = len(A_ ) A = [0] * len_array if len_array > 0: A = array[0] for i in range(1 ,A_ ): A = self.prefix_sum[i - 1] + array[i] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> int: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ) -> bool: A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(A_ ) return False if __name__ == "__main__": import doctest doctest.testmod()
74
1
"""simple docstring""" from graphs.minimum_spanning_tree_kruskal import kruskal def _snake_case ( ): A = 9 A = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] A = kruskal(snake_case__ , snake_case__ ) A = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(snake_case__ ) == sorted(snake_case__ )
74
"""simple docstring""" import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def _snake_case ( snake_case__ : str , snake_case__ : str ): A = RobertaPreLayerNormConfig.from_pretrained( snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] ) # convert state_dict A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) ) A = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.' ): A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ): continue A = tensor_value A = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ ) model.save_pretrained(snake_case__ ) # convert tokenizer A = AutoTokenizer.from_pretrained(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowercase = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
74
1
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[Any]=13 ,A_ : List[Any]=30 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=3 ,A_ : int=True ,A_ : Optional[Any]=True ,A_ : List[str]=32 ,A_ : str=2 ,A_ : str=4 ,A_ : int=37 ,A_ : Tuple="gelu" ,A_ : Any=0.1 ,A_ : int=0.1 ,A_ : str=10 ,A_ : List[str]=0.02 ,A_ : int=3 ,A_ : List[Any]=None ,A_ : int=2 ,) -> Dict: A = parent A = batch_size A = image_size A = patch_size A = num_channels A = is_training A = use_labels A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = type_sequence_label_size A = initializer_range A = scope A = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A = (image_size // patch_size) ** 2 A = num_patches + 2 def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: return DeiTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A_ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Any ,A_ : Any ,A_ : int ) -> int: A = TFDeiTModel(config=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ) -> List[Any]: A = TFDeiTForMaskedImageModeling(config=A_ ) A = model(A_ ) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A = 1 A = TFDeiTForMaskedImageModeling(A_ ) A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A = model(A_ ) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : str ,A_ : Tuple ) -> List[Any]: A = self.type_sequence_label_size A = TFDeiTForImageClassification(A_ ) A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images A = 1 A = TFDeiTForImageClassification(A_ ) A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: A = self.prepare_config_and_inputs() A , A , A = config_and_inputs A = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Tuple = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) _lowerCamelCase: Dict = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) _lowerCamelCase: Union[str, Any] = False _lowerCamelCase: Dict = False _lowerCamelCase: Any = False _lowerCamelCase: Dict = False def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: A = TFDeiTModelTester(self ) A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: pass def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ ,tf.keras.layers.Dense ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) A = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ['pixel_values'] self.assertListEqual(arg_names[:1] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : List[str] ,A_ : Any ,A_ : List[Any]=False ) -> str: A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = TFDeiTModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _snake_case ( ): A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: A = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ) A = self.default_image_processor A = prepare_img() A = image_processor(images=A_ ,return_tensors='tf' ) # forward pass A = model(**A_ ) # verify the logits A = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape ,A_ ) A = tf.constant([-1.02_66, 0.19_12, -1.28_61] ) self.assertTrue(np.allclose(outputs.logits[0, :3] ,A_ ,atol=1e-4 ) )
74
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''roformer''' def __init__( self : Tuple ,A_ : Optional[int]=5_0000 ,A_ : Tuple=None ,A_ : Optional[Any]=768 ,A_ : Dict=12 ,A_ : Optional[int]=12 ,A_ : Union[str, Any]=3072 ,A_ : Dict="gelu" ,A_ : Dict=0.1 ,A_ : List[Any]=0.1 ,A_ : List[Any]=1536 ,A_ : List[str]=2 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : Optional[int]=0 ,A_ : List[str]=False ,A_ : Tuple=True ,**A_ : List[str] ,) -> Dict: super().__init__(pad_token_id=A_ ,**A_ ) A = vocab_size A = hidden_size if embedding_size is None else embedding_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_act A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = initializer_range A = layer_norm_eps A = rotary_value A = use_cache class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": A = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A = {0: 'batch', 1: 'sequence'} A = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
74
1
"""simple docstring""" import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _snake_case ( snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[Any] ): if isinstance(snake_case__ , snake_case__ ): A = np.full((len(snake_case__ ), sequence_length, 2) , snake_case__ ) else: A = np.full((len(snake_case__ ), sequence_length) , snake_case__ ) for i, tensor in enumerate(snake_case__ ): if padding_side == "right": if isinstance(snake_case__ , snake_case__ ): A = tensor[:sequence_length] else: A = tensor[:sequence_length] else: if isinstance(snake_case__ , snake_case__ ): A = tensor[:sequence_length] else: A = tensor[:sequence_length] return out_tensor.tolist() def _snake_case ( snake_case__ : Tuple ): A = ord(snake_case__ ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True A = unicodedata.category(snake_case__ ) if cat.startswith('P' ): return True return False @dataclass class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: PreTrainedTokenizerBase _lowerCamelCase: Union[bool, str, PaddingStrategy] = True _lowerCamelCase: Optional[int] = None _lowerCamelCase: Optional[int] = None _lowerCamelCase: int = -100 _lowerCamelCase: str = "pt" def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ) -> Any: import torch A = 'label' if 'label' in features[0].keys() else 'labels' A = [feature[label_name] for feature in features] if label_name in features[0].keys() else None A = self.tokenizer.pad( A_ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' if labels is None else None ,) if labels is None: return batch A = torch.tensor(batch['entity_ids'] ).shape[1] A = self.tokenizer.padding_side if padding_side == "right": A = [ list(A_ ) + [self.label_pad_token_id] * (sequence_length - len(A_ )) for label in labels ] else: A = [ [self.label_pad_token_id] * (sequence_length - len(A_ )) + list(A_ ) for label in labels ] A = [feature['ner_tags'] for feature in features] A = padding_tensor(A_ ,-1 ,A_ ,A_ ) A = [feature['original_entity_spans'] for feature in features] A = padding_tensor(A_ ,(-1, -1) ,A_ ,A_ ) A = {k: torch.tensor(A_ ,dtype=torch.intaa ) for k, v in batch.items()} return batch
74
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _snake_case ( snake_case__ : Dict ): A = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) def _snake_case ( snake_case__ : int ): A , A = emb.weight.shape A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) A = emb.weight.data return lin_layer def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ): A = torch.load(snake_case__ , map_location='cpu' )['model'] remove_ignore_keys_(snake_case__ ) A = state_dict['encoder.embed_tokens.weight'].shape[0] A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ ) if mbart_aa and finetuned: A = 'relu' A = state_dict['decoder.embed_tokens.weight'] A = MBartForConditionalGeneration(snake_case__ ) model.model.load_state_dict(snake_case__ ) if finetuned: A = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') _lowercase = parser.parse_args() _lowercase = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
74
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def _snake_case ( snake_case__ : Union[str, Any] ): A = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Any = StableDiffusionLatentUpscalePipeline _lowerCamelCase: Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''height''', '''width''', '''cross_attention_kwargs''', '''negative_prompt_embeds''', '''prompt_embeds''', } _lowerCamelCase: List[str] = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''} _lowerCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _lowerCamelCase: Union[str, Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _lowerCamelCase: Optional[int] = frozenset([] ) _lowerCamelCase: Tuple = True @property def _SCREAMING_SNAKE_CASE ( self : str ) -> str: A = 1 A = 4 A = (16, 16) A = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(A_ ) return image def _SCREAMING_SNAKE_CASE ( self : int ) -> str: torch.manual_seed(0 ) A = UNetaDConditionModel( act_fn='gelu' ,attention_head_dim=8 ,norm_num_groups=A_ ,block_out_channels=[32, 32, 64, 64] ,time_cond_proj_dim=160 ,conv_in_kernel=1 ,conv_out_kernel=1 ,cross_attention_dim=32 ,down_block_types=( 'KDownBlock2D', 'KCrossAttnDownBlock2D', 'KCrossAttnDownBlock2D', 'KCrossAttnDownBlock2D', ) ,in_channels=8 ,mid_block_type=A_ ,only_cross_attention=A_ ,out_channels=5 ,resnet_time_scale_shift='scale_shift' ,time_embedding_type='fourier' ,timestep_post_act='gelu' ,up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') ,) A = AutoencoderKL( block_out_channels=[32, 32, 64, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=[ 'DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D', ] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,) A = EulerDiscreteScheduler(prediction_type='sample' ) A = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='quick_gelu' ,projection_dim=512 ,) A = CLIPTextModel(A_ ) A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) A = { 'unet': model.eval(), 'vae': vae.eval(), 'scheduler': scheduler, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Union[str, Any]=0 ) -> List[Any]: if str(A_ ).startswith('mps' ): A = torch.manual_seed(A_ ) else: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = { 'prompt': 'A painting of a squirrel eating a burger', 'image': self.dummy_image.cpu(), 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: A = 'cpu' A = self.get_dummy_components() A = self.pipeline_class(**A_ ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] self.assertEqual(image.shape ,(1, 256, 256, 3) ) A = np.array( [0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] ) A = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A_ ,1e-3 ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: super().test_inference_batch_single_identical(expected_max_diff=7e-3 ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: super().test_save_load_local(expected_max_difference=3e-3 ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: super().test_save_load_optional_components(expected_max_difference=3e-3 ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: A = [ 'DDIMScheduler', 'DDPMScheduler', 'PNDMScheduler', 'HeunDiscreteScheduler', 'EulerAncestralDiscreteScheduler', 'KDPM2DiscreteScheduler', 'KDPM2AncestralDiscreteScheduler', 'DPMSolverSDEScheduler', ] A = self.get_dummy_components() A = self.pipeline_class(**A_ ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=A_ ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 2 A = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue A = getattr(A_ ,scheduler_enum.name ) A = scheduler_cls.from_config(pipe.scheduler.config ) A = pipe(**A_ )[0] outputs.append(A_ ) assert check_same_shape(A_ ) @require_torch_gpu @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: A = torch.manual_seed(33 ) A = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ,torch_dtype=torch.floataa ) pipe.to('cuda' ) A = StableDiffusionLatentUpscalePipeline.from_pretrained( 'stabilityai/sd-x2-latent-upscaler' ,torch_dtype=torch.floataa ) upscaler.to('cuda' ) A = 'a photo of an astronaut high resolution, unreal engine, ultra realistic' A = pipe(A_ ,generator=A_ ,output_type='latent' ).images A = upscaler( prompt=A_ ,image=A_ ,num_inference_steps=20 ,guidance_scale=0 ,generator=A_ ,output_type='np' ,).images[0] A = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' ) assert np.abs((expected_image - image).mean() ) < 5e-2 def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A = torch.manual_seed(33 ) A = StableDiffusionLatentUpscalePipeline.from_pretrained( 'stabilityai/sd-x2-latent-upscaler' ,torch_dtype=torch.floataa ) upscaler.to('cuda' ) A = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas' A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' ) A = upscaler( prompt=A_ ,image=A_ ,num_inference_steps=20 ,guidance_scale=0 ,generator=A_ ,output_type='np' ,).images[0] A = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' ) assert np.abs((expected_image - image).max() ) < 5e-2
74
"""simple docstring""" import argparse import struct import unittest class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : bytes ) -> None: A = data # Initialize hash values A = [ 0X6_A_0_9_E_6_6_7, 0XB_B_6_7_A_E_8_5, 0X3_C_6_E_F_3_7_2, 0XA_5_4_F_F_5_3_A, 0X5_1_0_E_5_2_7_F, 0X9_B_0_5_6_8_8_C, 0X1_F_8_3_D_9_A_B, 0X5_B_E_0_C_D_1_9, ] # Initialize round constants A = [ 0X4_2_8_A_2_F_9_8, 0X7_1_3_7_4_4_9_1, 0XB_5_C_0_F_B_C_F, 0XE_9_B_5_D_B_A_5, 0X3_9_5_6_C_2_5_B, 0X5_9_F_1_1_1_F_1, 0X9_2_3_F_8_2_A_4, 0XA_B_1_C_5_E_D_5, 0XD_8_0_7_A_A_9_8, 0X1_2_8_3_5_B_0_1, 0X2_4_3_1_8_5_B_E, 0X5_5_0_C_7_D_C_3, 0X7_2_B_E_5_D_7_4, 0X8_0_D_E_B_1_F_E, 0X9_B_D_C_0_6_A_7, 0XC_1_9_B_F_1_7_4, 0XE_4_9_B_6_9_C_1, 0XE_F_B_E_4_7_8_6, 0X0_F_C_1_9_D_C_6, 0X2_4_0_C_A_1_C_C, 0X2_D_E_9_2_C_6_F, 0X4_A_7_4_8_4_A_A, 0X5_C_B_0_A_9_D_C, 0X7_6_F_9_8_8_D_A, 0X9_8_3_E_5_1_5_2, 0XA_8_3_1_C_6_6_D, 0XB_0_0_3_2_7_C_8, 0XB_F_5_9_7_F_C_7, 0XC_6_E_0_0_B_F_3, 0XD_5_A_7_9_1_4_7, 0X0_6_C_A_6_3_5_1, 0X1_4_2_9_2_9_6_7, 0X2_7_B_7_0_A_8_5, 0X2_E_1_B_2_1_3_8, 0X4_D_2_C_6_D_F_C, 0X5_3_3_8_0_D_1_3, 0X6_5_0_A_7_3_5_4, 0X7_6_6_A_0_A_B_B, 0X8_1_C_2_C_9_2_E, 0X9_2_7_2_2_C_8_5, 0XA_2_B_F_E_8_A_1, 0XA_8_1_A_6_6_4_B, 0XC_2_4_B_8_B_7_0, 0XC_7_6_C_5_1_A_3, 0XD_1_9_2_E_8_1_9, 0XD_6_9_9_0_6_2_4, 0XF_4_0_E_3_5_8_5, 0X1_0_6_A_A_0_7_0, 0X1_9_A_4_C_1_1_6, 0X1_E_3_7_6_C_0_8, 0X2_7_4_8_7_7_4_C, 0X3_4_B_0_B_C_B_5, 0X3_9_1_C_0_C_B_3, 0X4_E_D_8_A_A_4_A, 0X5_B_9_C_C_A_4_F, 0X6_8_2_E_6_F_F_3, 0X7_4_8_F_8_2_E_E, 0X7_8_A_5_6_3_6_F, 0X8_4_C_8_7_8_1_4, 0X8_C_C_7_0_2_0_8, 0X9_0_B_E_F_F_F_A, 0XA_4_5_0_6_C_E_B, 0XB_E_F_9_A_3_F_7, 0XC_6_7_1_7_8_F_2, ] A = self.preprocessing(self.data ) self.final_hash() @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : bytes ) -> bytes: A = B'\x80' + (B'\x00' * (63 - (len(A_ ) + 8) % 64)) A = struct.pack('>Q' ,(len(A_ ) * 8) ) return data + padding + big_endian_integer def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None: # Convert into blocks of 64 bytes A = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers A = list(struct.unpack('>16L' ,A_ ) ) # add 48 0-ed integers words += [0] * 48 A , A , A , A , A , A , A , A = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array A = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) A = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) A = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X1_0_0_0_0_0_0_0_0 # Compression A = self.ror(A_ ,6 ) ^ self.ror(A_ ,11 ) ^ self.ror(A_ ,25 ) A = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g) A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X1_0_0_0_0_0_0_0_0 A = self.ror(A_ ,2 ) ^ self.ror(A_ ,13 ) ^ self.ror(A_ ,22 ) A = (a & b) ^ (a & c) ^ (b & c) A = (sa + maj) % 0X1_0_0_0_0_0_0_0_0 A , A , A , A , A , A , A , A = ( g, f, e, ((d + tempa) % 0X1_0_0_0_0_0_0_0_0), c, b, a, ((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0), ) A = [a, b, c, d, e, f, g, h] # Modify final values A = [ ((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0) for index, element in enumerate(self.hashes ) ] A = ''.join([hex(A_ )[2:].zfill(8 ) for value in self.hashes] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : int ) -> int: return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations) class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None: import hashlib A = bytes('Test String' ,'utf-8' ) self.assertEqual(SHAaaa(A_ ).hash ,hashlib.shaaaa(A_ ).hexdigest() ) def _snake_case ( ): import doctest doctest.testmod() A = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) A = parser.parse_args() A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: A = f.read() else: A = bytes(snake_case__ , 'utf-8' ) print(SHAaaa(snake_case__ ).hash ) if __name__ == "__main__": main()
74
1
"""simple docstring""" class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Dict ,A_ : list[int] ) -> None: A = len(A_ ) A = [0] * len_array if len_array > 0: A = array[0] for i in range(1 ,A_ ): A = self.prefix_sum[i - 1] + array[i] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> int: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ) -> bool: A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(A_ ) return False if __name__ == "__main__": import doctest doctest.testmod()
74
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowercase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''DeiTFeatureExtractor'''] _lowercase = ['''DeiTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DeiTForImageClassification''', '''DeiTForImageClassificationWithTeacher''', '''DeiTForMaskedImageModeling''', '''DeiTModel''', '''DeiTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDeiTForImageClassification''', '''TFDeiTForImageClassificationWithTeacher''', '''TFDeiTForMaskedImageModeling''', '''TFDeiTModel''', '''TFDeiTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
74
1
"""simple docstring""" def _snake_case ( snake_case__ : float , snake_case__ : float ): if mass < 0: raise ValueError('The mass of a body cannot be negative' ) return 0.5 * mass * abs(snake_case__ ) * abs(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
74
"""simple docstring""" from __future__ import annotations import requests def _snake_case ( snake_case__ : str ): A = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty' return requests.get(snake_case__ ).json() def _snake_case ( snake_case__ : int = 10 ): A = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' A = requests.get(snake_case__ ).json()[:max_stories] return [get_hackernews_story(snake_case__ ) for story_id in story_ids] def _snake_case ( snake_case__ : int = 10 ): A = hackernews_top_stories(snake_case__ ) return "\n".join('* [{title}]({url})'.format(**snake_case__ ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
74
1
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class lowerCAmelCase_ ( _lowercase , _lowercase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = 1 @register_to_config def __init__( self : str ,A_ : Optional[Any]=2000 ,A_ : Union[str, Any]=0.1 ,A_ : Union[str, Any]=20 ,A_ : int=1e-3 ) -> Dict: A = None A = None A = None def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ,A_ : Union[str, torch.device] = None ) -> Optional[int]: A = torch.linspace(1 ,self.config.sampling_eps ,A_ ,device=A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Tuple=None ) -> Tuple: if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score A = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) A = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) A = std.flatten() while len(std.shape ) < len(score.shape ): A = std.unsqueeze(-1 ) A = -score / std # compute A = -1.0 / len(self.timesteps ) A = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) A = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): A = beta_t.unsqueeze(-1 ) A = -0.5 * beta_t * x A = torch.sqrt(A_ ) A = drift - diffusion**2 * score A = x + drift * dt # add noise A = randn_tensor(x.shape ,layout=x.layout ,generator=A_ ,device=x.device ,dtype=x.dtype ) A = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self : Tuple ) -> str: return self.config.num_train_timesteps
74
"""simple docstring""" from string import ascii_uppercase _lowercase = {char: i for i, char in enumerate(ascii_uppercase)} _lowercase = dict(enumerate(ascii_uppercase)) def _snake_case ( snake_case__ : str , snake_case__ : str ): A = len(snake_case__ ) A = 0 while True: if x == i: A = 0 if len(snake_case__ ) == len(snake_case__ ): break key += key[i] i += 1 return key def _snake_case ( snake_case__ : str , snake_case__ : str ): A = '' A = 0 for letter in message: if letter == " ": cipher_text += " " else: A = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def _snake_case ( snake_case__ : str , snake_case__ : str ): A = '' A = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: A = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def _snake_case ( ): A = 'THE GERMAN ATTACK' A = 'SECRET' A = generate_key(snake_case__ , snake_case__ ) A = cipher_text(snake_case__ , snake_case__ ) print(F'Encrypted Text = {s}' ) print(F'Original Text = {original_text(snake_case__ , snake_case__ )}' ) if __name__ == "__main__": import doctest doctest.testmod() main()
74
1
"""simple docstring""" def _snake_case ( snake_case__ : int ): A = int(snake_case__ ) if decimal in (0, 1): # Exit cases for the recursion return str(snake_case__ ) A , A = divmod(snake_case__ , 2 ) return binary_recursive(snake_case__ ) + str(snake_case__ ) def _snake_case ( snake_case__ : str ): A = str(snake_case__ ).strip() if not number: raise ValueError('No input value was provided' ) A = '-' if number.startswith('-' ) else '' A = number.lstrip('-' ) if not number.isnumeric(): raise ValueError('Input value is not an integer' ) return F'{negative}0b{binary_recursive(int(snake_case__ ) )}' if __name__ == "__main__": from doctest import testmod testmod()
74
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ) -> Optional[Any]: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): A = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: A = 'sgugger/tiny-distilbert-classification' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,only_pretrain_model=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,torchscript=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,fpaa=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: A = 'sshleifer/tiny-gpt2' A = AutoConfig.from_pretrained(A_ ) # set architectures equal to `None` A = None A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=A_ ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = 'sshleifer/tiny-gpt2' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = 'sshleifer/tinier_bart' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'sshleifer/tiny-gpt2' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'sshleifer/tinier_bart' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: A = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,save_to_csv=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(A_ ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(A_ ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(A_ ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(A_ ,'train_time.csv' ) ,env_info_csv_file=os.path.join(A_ ,'env.csv' ) ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) benchmark.run() self.assertTrue(Path(os.path.join(A_ ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'train_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'train_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'env.csv' ) ).exists() ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: A = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(A_ : Optional[int] ): self.assertTrue(hasattr(A_ ,'sequential' ) ) self.assertTrue(hasattr(A_ ,'cumulative' ) ) self.assertTrue(hasattr(A_ ,'current' ) ) self.assertTrue(hasattr(A_ ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(A_ ,'log.txt' ) ,log_print=A_ ,trace_memory_line_by_line=A_ ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(A_ ,'log.txt' ) ).exists() )
74
1
"""simple docstring""" from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Any = '''''' _lowerCamelCase: int = '''hf-legacy''' # "hf://"" is reserved for hffs def __init__( self : List[str] ,A_ : Optional[DatasetInfo] = None ,A_ : Optional[str] = None ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(self ,**A_ ) A = repo_info A = token A = None def _SCREAMING_SNAKE_CASE ( self : int ) -> str: if self.dir_cache is None: A = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes A = { 'name': hf_file.rfilename, 'size': None, 'type': 'file', } self.dir_cache.update( { str(A_ ): {'name': str(A_ ), 'size': None, 'type': 'directory'} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : str ,A_ : str = "rb" ,**A_ : Dict ,) -> Union[str, Any]: if not isinstance(self.repo_info ,A_ ): raise NotImplementedError(F'Open is only implemented for dataset repositories, but got {self.repo_info}' ) A = hf_hub_url(self.repo_info.id ,A_ ,revision=self.repo_info.sha ) return fsspec.open( A_ ,mode=A_ ,headers=get_authentication_headers_for_url(A_ ,use_auth_token=self.token ) ,client_kwargs={'trust_env': True} ,).open() def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : str ,**A_ : int ) -> Tuple: self._get_dirs() A = self._strip_protocol(A_ ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : str=False ,**A_ : Tuple ) -> str: self._get_dirs() A = PurePosixPath(path.strip('/' ) ) A = {} for p, f in self.dir_cache.items(): A = PurePosixPath(p.strip('/' ) ) A = p.parent if root == path: A = f A = list(paths.values() ) if detail: return out else: return sorted(f['name'] for f in out )
74
"""simple docstring""" # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _lowercase = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[Union[str, int]] = None _lowerCamelCase: Optional[Union[str, int]] = None _lowerCamelCase: Optional[Union[str, int]] = None def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: A , A , A = _str_to_version_tuple(self.version_str ) def __repr__( self : Optional[int] ) -> Dict: return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: return self.major, self.minor, self.patch def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ) -> Union[str, Any]: if isinstance(A_ ,A_ ): return Version(A_ ) elif isinstance(A_ ,A_ ): return other raise TypeError(F'{other} (type {type(A_ )}) cannot be compared to version.' ) def __eq__( self : List[Any] ,A_ : Dict ) -> Any: try: A = self._validate_operand(A_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : List[Any] ,A_ : Optional[int] ) -> Tuple: A = self._validate_operand(A_ ) return self.tuple < other.tuple def __hash__( self : Union[str, Any] ) -> Union[str, Any]: return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : List[str] ) -> List[str]: A = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self.version_str def _snake_case ( snake_case__ : List[str] ): A = _VERSION_REG.match(snake_case__ ) if not res: raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] ) def _snake_case ( snake_case__ : str ): return ".".join(str(snake_case__ ) for v in version_tuple )
74
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase = { '''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''], '''convert_funnel_original_tf_checkpoint_to_pytorch''': [], '''tokenization_funnel''': ['''FunnelTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''FunnelTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FunnelBaseModel''', '''FunnelForMaskedLM''', '''FunnelForMultipleChoice''', '''FunnelForPreTraining''', '''FunnelForQuestionAnswering''', '''FunnelForSequenceClassification''', '''FunnelForTokenClassification''', '''FunnelModel''', '''FunnelPreTrainedModel''', '''load_tf_weights_in_funnel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFFunnelBaseModel''', '''TFFunnelForMaskedLM''', '''TFFunnelForMultipleChoice''', '''TFFunnelForPreTraining''', '''TFFunnelForQuestionAnswering''', '''TFFunnelForSequenceClassification''', '''TFFunnelForTokenClassification''', '''TFFunnelModel''', '''TFFunnelPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
74
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml _lowercase = NewType('''DataClass''', Any) _lowercase = NewType('''DataClassType''', Any) def _snake_case ( snake_case__ : Tuple ): if isinstance(snake_case__ , snake_case__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' ) def _snake_case ( snake_case__ : list ): A = {str(snake_case__ ): choice for choice in choices} return lambda snake_case__ : str_to_choice.get(snake_case__ , snake_case__ ) def _snake_case ( *, snake_case__ : Union[str, List[str]] = None , snake_case__ : str = None , snake_case__ : Any = dataclasses.MISSING , snake_case__ : Callable[[], Any] = dataclasses.MISSING , snake_case__ : dict = None , **snake_case__ : Any , ): if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls A = {} if aliases is not None: A = aliases if help is not None: A = help return dataclasses.field(metadata=snake_case__ , default=snake_case__ , default_factory=snake_case__ , **snake_case__ ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Iterable[DataClassType] def __init__( self : List[str] ,A_ : Union[DataClassType, Iterable[DataClassType]] ,**A_ : Any ) -> Optional[int]: # To make the default appear when using --help if "formatter_class" not in kwargs: A = ArgumentDefaultsHelpFormatter super().__init__(**A_ ) if dataclasses.is_dataclass(A_ ): A = [dataclass_types] A = list(A_ ) for dtype in self.dataclass_types: self._add_dataclass_arguments(A_ ) @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ,A_ : dataclasses.Field ) -> Optional[Any]: A = F'--{field.name}' A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type ,A_ ): raise RuntimeError( 'Unresolved type detected, which should have been done with the help of ' '`typing.get_type_hints` method by default' ) A = kwargs.pop('aliases' ,[] ) if isinstance(A_ ,A_ ): A = [aliases] A = getattr(field.type ,'__origin__' ,field.type ) if origin_type is Union or (hasattr(A_ ,'UnionType' ) and isinstance(A_ ,types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(A_ ) not in field.type.__args__ ): raise ValueError( 'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because' ' the argument parser only supports one type per argument.' F' Problem encountered in field \'{field.name}\'.' ) if type(A_ ) not in field.type.__args__: # filter `str` in Union A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] A = getattr(field.type ,'__origin__' ,field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) A = ( field.type.__args__[0] if isinstance(A_ ,field.type.__args__[1] ) else field.type.__args__[1] ) A = getattr(field.type ,'__origin__' ,field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) A = {} if origin_type is Literal or (isinstance(field.type ,A_ ) and issubclass(field.type ,A_ )): if origin_type is Literal: A = field.type.__args__ else: A = [x.value for x in field.type] A = make_choice_type_function(kwargs['choices'] ) if field.default is not dataclasses.MISSING: A = field.default else: A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument A = copy(A_ ) # Hack because type=bool in argparse does not behave as we want. A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way A = default # This tells argparse we accept 0 or 1 value after --field_name A = '?' # This is the value that will get picked if we do --field_name (without value) A = True elif isclass(A_ ) and issubclass(A_ ,A_ ): A = field.type.__args__[0] A = '+' if field.default_factory is not dataclasses.MISSING: A = field.default_factory() elif field.default is dataclasses.MISSING: A = True else: A = field.type if field.default is not dataclasses.MISSING: A = field.default elif field.default_factory is not dataclasses.MISSING: A = field.default_factory() else: A = True parser.add_argument(A_ ,*A_ ,**A_ ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): A = False parser.add_argument(F'--no_{field.name}' ,action='store_false' ,dest=field.name ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : DataClassType ) -> List[Any]: if hasattr(A_ ,'_argument_group_name' ): A = self.add_argument_group(dtype._argument_group_name ) else: A = self try: A = get_type_hints(A_ ) except NameError: raise RuntimeError( F'Type resolution failed for {dtype}. Try declaring the class in global scope or ' 'removing line of `from __future__ import annotations` which opts in Postponed ' 'Evaluation of Annotations (PEP 563)' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A_ ): A = '.'.join(map(A_ ,sys.version_info[:3] ) ) raise RuntimeError( F'Type resolution failed for {dtype} on Python {python_version}. Try removing ' 'line of `from __future__ import annotations` which opts in union types as ' '`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ' 'support Python versions that lower than 3.10, you need to use ' '`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ' '`X | None`.' ) from ex raise for field in dataclasses.fields(A_ ): if not field.init: continue A = type_hints[field.name] self._parse_dataclass_field(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Any=None ,A_ : int=False ,A_ : Any=True ,A_ : List[str]=None ,A_ : Union[str, Any]=None ,) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): A = [] if args_filename: args_files.append(Path(A_ ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values A = ArgumentParser() args_file_parser.add_argument(A_ ,type=A_ ,action='append' ) # Use only remaining args for further parsing (remove the args_file_flag) A , A = args_file_parser.parse_known_args(args=A_ ) A = vars(A_ ).get(args_file_flag.lstrip('-' ) ,A_ ) if cmd_args_file_paths: args_files.extend([Path(A_ ) for p in cmd_args_file_paths] ) A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last A = file_args + args if args is not None else file_args + sys.argv[1:] A , A = self.parse_known_args(args=A_ ) A = [] for dtype in self.dataclass_types: A = {f.name for f in dataclasses.fields(A_ ) if f.init} A = {k: v for k, v in vars(A_ ).items() if k in keys} for k in keys: delattr(A_ ,A_ ) A = dtype(**A_ ) outputs.append(A_ ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(A_ ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' ) return (*outputs,) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Dict[str, Any] ,A_ : bool = False ) -> Tuple[DataClass, ...]: A = set(args.keys() ) A = [] for dtype in self.dataclass_types: A = {f.name for f in dataclasses.fields(A_ ) if f.init} A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) A = dtype(**A_ ) outputs.append(A_ ) if not allow_extra_keys and unused_keys: raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(A_ )}' ) return tuple(A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]: with open(Path(A_ ) ,encoding='utf-8' ) as open_json_file: A = json.loads(open_json_file.read() ) A = self.parse_dict(A_ ,allow_extra_keys=A_ ) return tuple(A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]: A = self.parse_dict(yaml.safe_load(Path(A_ ).read_text() ) ,allow_extra_keys=A_ ) return tuple(A_ )
74
1
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _snake_case ( snake_case__ : Dict ): A = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) def _snake_case ( snake_case__ : int ): A , A = emb.weight.shape A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) A = emb.weight.data return lin_layer def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ): A = torch.load(snake_case__ , map_location='cpu' )['model'] remove_ignore_keys_(snake_case__ ) A = state_dict['encoder.embed_tokens.weight'].shape[0] A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ ) if mbart_aa and finetuned: A = 'relu' A = state_dict['decoder.embed_tokens.weight'] A = MBartForConditionalGeneration(snake_case__ ) model.model.load_state_dict(snake_case__ ) if finetuned: A = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') _lowercase = parser.parse_args() _lowercase = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
74
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _lowercase = 16 _lowercase = 32 def _snake_case ( snake_case__ : Accelerator , snake_case__ : int = 16 , snake_case__ : str = "bert-base-cased" ): A = AutoTokenizer.from_pretrained(snake_case__ ) A = load_dataset('glue' , 'mrpc' ) def tokenize_function(snake_case__ : Dict ): # max_length=None => use the model max length (it's actually the default) A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset A = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=snake_case__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(snake_case__ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. A = DataLoader( tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) A = DataLoader( tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Optional[int] ): # Initialize accelerator A = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A = config['lr'] A = int(config['num_epochs'] ) A = int(config['seed'] ) A = int(config['batch_size'] ) A = args.model_name_or_path set_seed(snake_case__ ) A , A = get_dataloaders(snake_case__ , snake_case__ , snake_case__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ ) # Instantiate optimizer A = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) A = optimizer_cls(params=model.parameters() , lr=snake_case__ ) if accelerator.state.deepspeed_plugin is not None: A = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: A = 1 A = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): A = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , ) else: A = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A , A , A , A , A = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # We need to keep track of how many total steps we have iterated over A = 0 # We also need to keep track of the stating epoch so files are named properly A = 0 # Now we train the model A = evaluate.load('glue' , 'mrpc' ) A = 0 A = {} for epoch in range(snake_case__ , snake_case__ ): model.train() for step, batch in enumerate(snake_case__ ): A = model(**snake_case__ ) A = outputs.loss A = loss / gradient_accumulation_steps accelerator.backward(snake_case__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() A = 0 for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A = model(**snake_case__ ) A = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times A , A = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(snake_case__ ) - 1: A = predictions[: len(eval_dataloader.dataset ) - samples_seen] A = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) A = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , snake_case__ ) A = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: A = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(snake_case__ , snake_case__ ) def _snake_case ( ): A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=snake_case__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , ) parser.add_argument( '--output_dir' , type=snake_case__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=snake_case__ , default=snake_case__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=snake_case__ , default=3 , help='Number of train epochs.' , ) A = parser.parse_args() A = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
74
1
"""simple docstring""" import string from math import logaa def _snake_case ( snake_case__ : str , snake_case__ : str ): A = document.translate( str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' ) A = document_without_punctuation.split(' ' ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def _snake_case ( snake_case__ : str , snake_case__ : str ): A = corpus.lower().translate( str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with '' A = corpus_without_punctuation.split('\n' ) A = term.lower() return (len([doc for doc in docs if term in doc] ), len(snake_case__ )) def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : Any=False ): if smoothing: if n == 0: raise ValueError('log10(0) is undefined.' ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError('df must be > 0' ) elif n == 0: raise ValueError('log10(0) is undefined.' ) return round(logaa(n / df ) , 3 ) def _snake_case ( snake_case__ : int , snake_case__ : int ): return round(tf * idf , 3 )
74
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ,A_ : str ,A_ : Dict=13 ,A_ : str=7 ,A_ : str=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=False ,A_ : str=False ,A_ : Tuple=False ,A_ : str=2 ,A_ : Optional[int]=99 ,A_ : Union[str, Any]=0 ,A_ : Optional[Any]=32 ,A_ : Optional[int]=5 ,A_ : Optional[int]=4 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=0.1 ,A_ : Union[str, Any]=512 ,A_ : Union[str, Any]=2 ,A_ : Any=0.02 ,A_ : List[str]=2 ,A_ : int=4 ,A_ : int="last" ,A_ : Dict=True ,A_ : Union[str, Any]=None ,A_ : Any=0 ,) -> List[Any]: A = parent A = batch_size A = seq_length A = is_training A = use_input_lengths A = use_token_type_ids A = use_labels A = gelu_activation A = sinusoidal_embeddings A = causal A = asm A = n_langs A = vocab_size A = n_special A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = summary_type A = use_proj A = scope A = bos_token_id def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_input_lengths: A = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,2 ).float() A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: return XLMConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ,A_ : int ,A_ : Dict ,A_ : str ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : int ,A_ : str ,) -> Any: A = XLMModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,lengths=A_ ,langs=A_ ) A = model(A_ ,langs=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Any ,A_ : str ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : str ,A_ : Any ,A_ : str ,A_ : Dict ,) -> Dict: A = XLMWithLMHeadModel(A_ ) model.to(A_ ) model.eval() A = model(A_ ,token_type_ids=A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Any ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[Any] ,) -> int: A = XLMForQuestionAnsweringSimple(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,start_positions=A_ ,end_positions=A_ ) A = outputs self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Optional[int] ,A_ : Any ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Tuple ,A_ : List[str] ,A_ : Optional[int] ,) -> List[Any]: A = XLMForQuestionAnswering(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,) ((A) , ) = result_with_labels.to_tuple() A = model(A_ ,start_positions=A_ ,end_positions=A_ ) ((A) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape ,() ) self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : int ,A_ : Optional[int] ,A_ : List[str] ,A_ : str ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ,) -> Optional[int]: A = XLMForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ,A_ : str ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : Optional[int] ,) -> List[str]: A = self.num_labels A = XLMForTokenClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : Union[str, Any] ,A_ : Dict ,A_ : List[Any] ,) -> List[str]: A = self.num_choices A = XLMForMultipleChoice(config=A_ ) model.to(A_ ) model.eval() A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = model( A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _lowerCamelCase: str = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _lowerCamelCase: Optional[int] = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ) -> Any: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Optional[int] ,A_ : List[Any]=False ) -> int: A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = XLMModelTester(self ) A = ConfigTester(self ,config_class=A_ ,emb_dim=37 ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : Any ,A_ : str ,A_ : Tuple ,A_ : Any ,A_ : Any=False ,A_ : Any=1 ) -> List[Any]: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) ) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = min_length + idx + 1 A = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : str ,A_ : Optional[int] ,A_ : int ,A_ : Any ,A_ : str=False ,A_ : Any=1 ) -> Tuple: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,) pass @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = XLMModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(A_ ) A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president A = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A = model.generate(A_ ,do_sample=A_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ )
74
1
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _lowercase = logging.get_logger(__name__) _lowercase = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } _lowercase = { '''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''}, '''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''}, '''tokenizer_config_file''': { '''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json''' }, } _lowercase = {'''facebook/blenderbot-3B''': 1_28} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _snake_case ( ): A = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) A = bs[:] A = 0 for b in range(2**8 ): if b not in bs: bs.append(snake_case__ ) cs.append(2**8 + n ) n += 1 A = [chr(snake_case__ ) for n in cs] return dict(zip(snake_case__ , snake_case__ ) ) def _snake_case ( snake_case__ : List[Any] ): A = set() A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) A = char return pairs class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Dict = VOCAB_FILES_NAMES _lowerCamelCase: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: Dict = ['''input_ids''', '''attention_mask'''] def __init__( self : Any ,A_ : List[str] ,A_ : int ,A_ : int="replace" ,A_ : List[str]="<s>" ,A_ : List[Any]="</s>" ,A_ : Optional[Any]="</s>" ,A_ : List[str]="<s>" ,A_ : int="<unk>" ,A_ : str="<pad>" ,A_ : Union[str, Any]="<mask>" ,A_ : int=False ,**A_ : str ,) -> List[str]: A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else bos_token A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else eos_token A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else sep_token A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else cls_token A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else unk_token A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else mask_token super().__init__( errors=A_ ,bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,cls_token=A_ ,pad_token=A_ ,mask_token=A_ ,add_prefix_space=A_ ,**A_ ,) with open(A_ ,encoding='utf-8' ) as vocab_handle: A = json.load(A_ ) A = {v: k for k, v in self.encoder.items()} A = errors # how to handle errors in decoding A = bytes_to_unicode() A = {v: k for k, v in self.byte_encoder.items()} with open(A_ ,encoding='utf-8' ) as merges_handle: A = merges_handle.read().split('\n' )[1:-1] A = [tuple(merge.split() ) for merge in bpe_merges] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = {} A = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions A = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: return len(self.encoder ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: return dict(self.encoder ,**self.added_tokens_encoder ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ) -> Union[str, Any]: if token in self.cache: return self.cache[token] A = tuple(A_ ) A = get_pairs(A_ ) if not pairs: return token while True: A = min(A_ ,key=lambda A_ : self.bpe_ranks.get(A_ ,float('inf' ) ) ) if bigram not in self.bpe_ranks: break A , A = bigram A = [] A = 0 while i < len(A_ ): try: A = word.index(A_ ,A_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A = j if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A = tuple(A_ ) A = new_word if len(A_ ) == 1: break else: A = get_pairs(A_ ) A = ' '.join(A_ ) A = word return word def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ) -> Tuple: A = [] for token in re.findall(self.pat ,A_ ): A = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) ) return bpe_tokens def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Dict ) -> Union[str, Any]: return self.encoder.get(A_ ,self.encoder.get(self.unk_token ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> Dict: return self.decoder.get(A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : List[Any] ) -> int: A = ''.join(A_ ) A = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' ,errors=self.errors ) return text def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(A_ ,'w' ,encoding='utf-8' ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=A_ ,ensure_ascii=A_ ) + '\n' ) A = 0 with open(A_ ,'w' ,encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda A_ : kv[1] ): if index != token_index: logger.warning( F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ' Please check that the tokenizer is not corrupted!' ) A = token_index writer.write(' '.join(A_ ) + '\n' ) index += 1 return vocab_file, merge_file def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[int] ,A_ : Optional[Any]=False ,**A_ : Tuple ) -> List[Any]: A = kwargs.pop('add_prefix_space' ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()): A = ' ' + text return (text, kwargs) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> str: return token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : "Conversation" ) -> List[int]: A = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(A_ ) A = ' '.join(A_ ) A = self.encode(A_ ) if len(A_ ) > self.model_max_length: A = input_ids[-self.model_max_length :] logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
74
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf _lowercase = logging.get_logger(__name__) @dataclass class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : int ,**A_ : Any ) -> Any: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: A = deprecated_arg[3:] A = not kwargs.pop(A_ ) logger.warning( F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or' F' {positive_arg}={kwargs[positive_arg]}' ) A = kwargs.pop('tpu_name' ,self.tpu_name ) A = kwargs.pop('device_idx' ,self.device_idx ) A = kwargs.pop('eager_mode' ,self.eager_mode ) A = kwargs.pop('use_xla' ,self.use_xla ) super().__init__(**A_ ) _lowerCamelCase: str = field( default=_lowercase , metadata={'''help''': '''Name of TPU'''} , ) _lowerCamelCase: int = field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) _lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Benchmark models in eager model.'''} ) _lowerCamelCase: bool = field( default=_lowercase , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self ,['tf'] ) A = None if self.tpu: try: if self.tpu_name: A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: A = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: A = None return tpu @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self ,['tf'] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) A = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' ) A = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' ) else: tf.config.set_visible_devices([] ,'GPU' ) # disable GPU A = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' ) return strategy @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool: requires_backends(self ,['tf'] ) return self._setup_tpu is not None @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> "tf.distribute.Strategy": requires_backends(self ,['tf'] ) return self._setup_strategy @property def _SCREAMING_SNAKE_CASE ( self : int ) -> str: requires_backends(self ,['tf'] ) return tf.config.list_physical_devices('GPU' ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: requires_backends(self ,['tf'] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _SCREAMING_SNAKE_CASE ( self : str ) -> bool: return self.n_gpu > 0
74
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase = { '''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''], '''tokenization_deberta''': ['''DebertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''DebertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DebertaForMaskedLM''', '''DebertaForQuestionAnswering''', '''DebertaForSequenceClassification''', '''DebertaForTokenClassification''', '''DebertaModel''', '''DebertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDebertaForMaskedLM''', '''TFDebertaForQuestionAnswering''', '''TFDebertaForSequenceClassification''', '''TFDebertaForTokenClassification''', '''TFDebertaModel''', '''TFDebertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
74
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig _lowercase = logging.get_logger(__name__) _lowercase = { '''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''', # See all DPT models at https://huggingface.co/models?filter=dpt } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Tuple = '''dpt''' def __init__( self : str ,A_ : Tuple=768 ,A_ : int=12 ,A_ : Optional[int]=12 ,A_ : Optional[int]=3072 ,A_ : List[str]="gelu" ,A_ : str=0.0 ,A_ : int=0.0 ,A_ : str=0.02 ,A_ : str=1e-12 ,A_ : str=384 ,A_ : Dict=16 ,A_ : Union[str, Any]=3 ,A_ : Dict=False ,A_ : Any=True ,A_ : Optional[int]=[2, 5, 8, 11] ,A_ : Optional[Any]="project" ,A_ : Tuple=[4, 2, 1, 0.5] ,A_ : int=[96, 192, 384, 768] ,A_ : int=256 ,A_ : str=-1 ,A_ : Optional[int]=False ,A_ : Optional[int]=True ,A_ : Union[str, Any]=0.4 ,A_ : Union[str, Any]=255 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=[1, 1024, 24, 24] ,A_ : List[str]=[0, 1] ,A_ : List[Any]=None ,**A_ : Tuple ,) -> Union[str, Any]: super().__init__(**A_ ) A = hidden_size A = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('Initializing the config with a `BiT` backbone.' ) A = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, } A = BitConfig(**A_ ) elif isinstance(A_ ,A_ ): logger.info('Initializing the config with a `BiT` backbone.' ) A = BitConfig(**A_ ) elif isinstance(A_ ,A_ ): A = backbone_config else: raise ValueError( F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' ) A = backbone_featmap_shape A = neck_ignore_stages if readout_type != "project": raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' ) else: A = None A = None A = [] A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' ) A = readout_type A = reassemble_factors A = neck_hidden_sizes A = fusion_hidden_size A = head_in_index A = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) A = use_auxiliary_head A = auxiliary_loss_weight A = semantic_loss_ignore_index A = semantic_classifier_dropout def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: A = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: A = self.backbone_config.to_dict() A = self.__class__.model_type return output
74
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''microsoft/unispeech-sat-base-100h-libri-ft''': ( '''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json''' ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''unispeech-sat''' def __init__( self : int ,A_ : Optional[int]=32 ,A_ : Optional[int]=768 ,A_ : Union[str, Any]=12 ,A_ : Optional[int]=12 ,A_ : Optional[int]=3072 ,A_ : Union[str, Any]="gelu" ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=0.1 ,A_ : Any=0.1 ,A_ : Optional[int]=0.0 ,A_ : Union[str, Any]=0.0 ,A_ : str=0.1 ,A_ : Union[str, Any]=0.1 ,A_ : int=0.02 ,A_ : int=1e-5 ,A_ : int="group" ,A_ : List[str]="gelu" ,A_ : Dict=(512, 512, 512, 512, 512, 512, 512) ,A_ : Optional[int]=(5, 2, 2, 2, 2, 2, 2) ,A_ : Any=(10, 3, 3, 3, 3, 2, 2) ,A_ : Optional[Any]=False ,A_ : str=128 ,A_ : Union[str, Any]=16 ,A_ : List[str]=False ,A_ : str=True ,A_ : Dict=0.05 ,A_ : Optional[Any]=10 ,A_ : Optional[int]=2 ,A_ : List[Any]=0.0 ,A_ : Union[str, Any]=10 ,A_ : str=0 ,A_ : str=320 ,A_ : Any=2 ,A_ : Dict=0.1 ,A_ : Optional[Any]=100 ,A_ : Optional[int]=256 ,A_ : Union[str, Any]=256 ,A_ : Tuple=0.1 ,A_ : int="mean" ,A_ : List[Any]=False ,A_ : Tuple=False ,A_ : Dict=256 ,A_ : List[str]=(512, 512, 512, 512, 1500) ,A_ : Optional[Any]=(5, 3, 3, 1, 1) ,A_ : Optional[Any]=(1, 2, 3, 1, 1) ,A_ : List[Any]=512 ,A_ : Any=0 ,A_ : Tuple=1 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=504 ,**A_ : Dict ,) -> List[str]: super().__init__(**A_ ,pad_token_id=A_ ,bos_token_id=A_ ,eos_token_id=A_ ) A = hidden_size A = feat_extract_norm A = feat_extract_activation A = list(A_ ) A = list(A_ ) A = list(A_ ) A = conv_bias A = num_conv_pos_embeddings A = num_conv_pos_embedding_groups A = len(self.conv_dim ) A = num_hidden_layers A = intermediate_size A = hidden_act A = num_attention_heads A = hidden_dropout A = attention_dropout A = activation_dropout A = feat_proj_dropout A = final_dropout A = layerdrop A = layer_norm_eps A = initializer_range A = vocab_size A = num_clusters A = do_stable_layer_norm A = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A = apply_spec_augment A = mask_time_prob A = mask_time_length A = mask_time_min_masks A = mask_feature_prob A = mask_feature_length A = mask_feature_min_masks # parameters for pretraining with codevector quantized representations A = num_codevectors_per_group A = num_codevector_groups A = contrastive_logits_temperature A = feat_quantizer_dropout A = num_negatives A = codevector_dim A = proj_codevector_dim A = diversity_loss_weight # ctc loss A = ctc_loss_reduction A = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. A = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. A = list(A_ ) A = list(A_ ) A = list(A_ ) A = xvector_output_dim @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: return functools.reduce(operator.mul ,self.conv_stride ,1 )
74
"""simple docstring""" from __future__ import annotations import math _lowercase = '''2020.9.26''' _lowercase = '''xcodz-dot, cclaus, dhruvmanila''' def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float ): if not all(isinstance(snake_case__ , (float, int) ) for val in locals().values() ): A = F'Input values must either be float or int: {list(locals().values() )}' raise TypeError(snake_case__ ) A = ((x * distance) / (z + distance)) * scale A = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : str , snake_case__ : float ): if not isinstance(snake_case__ , snake_case__ ): raise TypeError('Axis must be a str' ) A = locals() del input_variables["axis"] if not all(isinstance(snake_case__ , (float, int) ) for val in input_variables.values() ): A = ( 'Input values except axis must either be float or int: ' F'{list(input_variables.values() )}' ) raise TypeError(snake_case__ ) A = (angle % 360) / 450 * 180 / math.pi if axis == "z": A = x * math.cos(snake_case__ ) - y * math.sin(snake_case__ ) A = y * math.cos(snake_case__ ) + x * math.sin(snake_case__ ) A = z elif axis == "x": A = y * math.cos(snake_case__ ) - z * math.sin(snake_case__ ) A = z * math.cos(snake_case__ ) + y * math.sin(snake_case__ ) A = x elif axis == "y": A = x * math.cos(snake_case__ ) - z * math.sin(snake_case__ ) A = z * math.cos(snake_case__ ) + x * math.sin(snake_case__ ) A = y else: raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""") print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
74
1
"""simple docstring""" from __future__ import annotations import math def _snake_case ( snake_case__ : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case ( snake_case__ : int ): A = str(snake_case__ ) A = [n] for i in range(1 , len(snake_case__ ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def _snake_case ( snake_case__ : int ): if len(str(snake_case__ ) ) > 3: if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ): return False return True def _snake_case ( snake_case__ : int = 11 ): A = [] A = 13 while len(snake_case__ ) != count: if validate(snake_case__ ): A = list_truncated_nums(snake_case__ ) if all(is_prime(snake_case__ ) for i in list_nums ): list_truncated_primes.append(snake_case__ ) num += 2 return list_truncated_primes def _snake_case ( ): return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(F"""{sum(compute_truncated_primes(11)) = }""")
74
"""simple docstring""" class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int ,A_ : int ) -> Union[str, Any]: A = n A = [None] * self.n A = 0 # index of the first element A = 0 A = 0 def __len__( self : int ) -> int: return self.size def _SCREAMING_SNAKE_CASE ( self : Any ) -> bool: return self.size == 0 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: return False if self.is_empty() else self.array[self.front] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> int: if self.size >= self.n: raise Exception('QUEUE IS FULL' ) A = data A = (self.rear + 1) % self.n self.size += 1 return self def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: if self.size == 0: raise Exception('UNDERFLOW' ) A = self.array[self.front] A = None A = (self.front + 1) % self.n self.size -= 1 return temp
74
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor _lowercase = logging.get_logger(__name__) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : str ,*A_ : Union[str, Any] ,**A_ : Optional[int] ) -> None: warnings.warn( 'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DonutImageProcessor instead.' ,A_ ,) super().__init__(*A_ ,**A_ )
74
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _lowercase = logging.get_logger(__name__) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Union[str, Any] ,*A_ : List[str] ,**A_ : int ) -> None: warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' ,A_ ,) super().__init__(*A_ ,**A_ )
74
1
"""simple docstring""" import math class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Union[str, Any] ,A_ : Optional[int]=0 ) -> List[str]: # a graph with Node 0,1,...,N-1 A = n A = [ [math.inf for j in range(0 ,A_ )] for i in range(0 ,A_ ) ] # adjacency matrix for weight A = [ [math.inf for j in range(0 ,A_ )] for i in range(0 ,A_ ) ] # dp[i][j] stores minimum distance from i to j def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[str] ,A_ : Any ,A_ : Any ) -> Optional[Any]: A = w def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: for k in range(0 ,self.n ): for i in range(0 ,self.n ): for j in range(0 ,self.n ): A = min(self.dp[i][j] ,self.dp[i][k] + self.dp[k][j] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> Optional[Any]: return self.dp[u][v] if __name__ == "__main__": _lowercase = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
74
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = '''gpt_bigcode''' _lowerCamelCase: List[Any] = ['''past_key_values'''] _lowerCamelCase: int = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Optional[int] ,A_ : Dict=5_0257 ,A_ : Union[str, Any]=1024 ,A_ : str=768 ,A_ : Any=12 ,A_ : Any=12 ,A_ : Optional[int]=None ,A_ : Any="gelu_pytorch_tanh" ,A_ : List[str]=0.1 ,A_ : Optional[int]=0.1 ,A_ : List[str]=0.1 ,A_ : Tuple=1e-5 ,A_ : Optional[int]=0.02 ,A_ : List[str]=True ,A_ : Optional[Any]=True ,A_ : List[Any]=5_0256 ,A_ : Union[str, Any]=5_0256 ,A_ : int=True ,A_ : Optional[Any]=True ,A_ : Dict=True ,**A_ : Union[str, Any] ,) -> Union[str, Any]: A = vocab_size A = n_positions A = n_embd A = n_layer A = n_head A = n_inner A = activation_function A = resid_pdrop A = embd_pdrop A = attn_pdrop A = layer_norm_epsilon A = initializer_range A = scale_attn_weights A = use_cache A = attention_softmax_in_fpaa A = scale_attention_softmax_in_fpaa A = multi_query A = bos_token_id A = eos_token_id super().__init__(bos_token_id=A_ ,eos_token_id=A_ ,**A_ )
74
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: UNetaDModel _lowerCamelCase: KarrasVeScheduler def __init__( self : List[Any] ,A_ : UNetaDModel ,A_ : KarrasVeScheduler ) -> Dict: super().__init__() self.register_modules(unet=A_ ,scheduler=A_ ) @torch.no_grad() def __call__( self : Optional[Any] ,A_ : int = 1 ,A_ : int = 50 ,A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]: A = self.unet.config.sample_size A = (batch_size, 3, img_size, img_size) A = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) A = randn_tensor(A_ ,generator=A_ ,device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper A = self.scheduler.schedule[t] A = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat A , A = self.scheduler.add_noise_to_input(A_ ,A_ ,generator=A_ ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. A = (sigma_hat / 2) * model((sample_hat + 1) / 2 ,sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev A = self.scheduler.step(A_ ,A_ ,A_ ,A_ ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. A = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 ,sigma_prev / 2 ).sample A = self.scheduler.step_correct( A_ ,A_ ,A_ ,A_ ,step_output.prev_sample ,step_output['derivative'] ,) A = step_output.prev_sample A = (sample / 2 + 0.5).clamp(0 ,1 ) A = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy() if output_type == "pil": A = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ )
74
"""simple docstring""" import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed _lowercase = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) _lowercase = '''sshleifer/student_marian_en_ro_6_1''' _lowercase = '''sshleifer/tiny-mbart''' @require_torch class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any]=False ,A_ : Optional[int]=None ,A_ : List[str]=True ,A_ : Tuple=True ,A_ : Union[str, Any]=True ,A_ : List[str]=True ,) -> Tuple: A = self.run_trainer( eval_steps=1 ,max_len=12 ,model_name=A_ ,num_train_epochs=1 ,distributed=A_ ,extra_args_str=A_ ,predict_with_generate=A_ ,do_train=A_ ,do_eval=A_ ,do_predict=A_ ,) A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history if not do_eval: return A = [log for log in logs if 'eval_loss' in log.keys()] A = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats A = eval_metrics[-1] assert isinstance(last_step_stats['eval_bleu'] ,A_ ) assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: self.run_seqaseq_quick() @require_torch_multi_gpu def _SCREAMING_SNAKE_CASE ( self : int ) -> int: self.run_seqaseq_quick(distributed=A_ ) @require_torch_multi_gpu def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: self.run_seqaseq_quick(distributed=A_ ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple' ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple --fp16' ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2' ,predict_with_generate=A_ ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: self.run_seqaseq_quick( distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2 --fp16' ,predict_with_generate=A_ ) @require_apex @require_torch_gpu def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' ) @parameterized.expand(['base', 'low', 'high', 'mixed'] ) @require_torch_multi_gpu def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Dict ) -> List[str]: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout A = { # test with the default log_level - should be info and thus log info once 'base': {'extra_args_str': '', 'n_matches': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes 'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica 'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1}, # test with high log_level and log_level_replica - should be quiet on all processes 'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0}, } A = experiments[experiment_id] A = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False} A = 'Running training' with CaptureStderr() as cl: self.run_seqaseq_quick(**A_ ,extra_args_str=data['extra_args_str'] ) A = len(re.findall(A_ ,cl.err ) ) self.assertEqual(A_ ,data['n_matches'] ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: A = self.run_trainer( eval_steps=2 ,max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=10 ,distributed=A_ ,) # Check metrics A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history A = [log for log in logs if 'eval_loss' in log.keys()] A = eval_metrics[0] A = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['eval_bleu'] ,A_ ) # test if do_predict saves generations and metrics A = os.listdir(A_ ) A = {os.path.basename(A_ ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: from transformers.training_args import OptimizerNames def train_and_return_metrics(A_ : str ) -> Tuple[int, float]: A = '--skip_memory_metrics 0' A = self.run_trainer( max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=1 ,optim=A_ ,distributed=A_ ,extra_args_str=A_ ,do_eval=A_ ,do_predict=A_ ,n_gpus_to_use=1 ,) # Check metrics A = TrainerState.load_from_json(Path(A_ ,'trainer_state.json' ) ).log_history A = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 ) A = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 ) A = logs[0]['train_loss'] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) A = gpu_alloc_mem_orig - gpu_alloc_mem_bnb A = gpu_peak_mem_orig + gpu_alloc_mem_orig A = gpu_peak_mem_bnb + gpu_alloc_mem_bnb A = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings A = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( A_ ,A_ ,'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got' F' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and' F' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' ,) self.assertGreater( A_ ,A_ ,'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got' F' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and' F' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' ,) self.assertEqual( A_ ,A_ ,F'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ,A_ : str ,A_ : int ,A_ : float = 3e-3 ,A_ : str = "adafactor" ,A_ : bool = False ,A_ : str = None ,A_ : int = 0 ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : int = None ,) -> Dict: A = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro' A = self.get_auto_remove_tmp_dir() A = F'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split() A = F'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A_ )}\n '.split() A = '\n --do_predict\n '.split() A = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F'--optim {optim}'.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: A = get_gpu_count() A = get_torch_dist_unique_port() A = F'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split() A = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(A_ ,env=self.get_env() ) else: A = ['run_translation.py'] + args with patch.object(A_ ,'argv' ,A_ ): main() return output_dir
74
1
"""simple docstring""" import os import sys import unittest _lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _lowercase = os.path.join(git_repo_path, '''src''', '''diffusers''') class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: A = find_backend(' if not is_torch_available():' ) self.assertEqual(A_ ,'torch' ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") A = find_backend(' if not (is_torch_available() and is_transformers_available()):' ) self.assertEqual(A_ ,'torch_and_transformers' ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") A = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' ) self.assertEqual(A_ ,'torch_and_transformers_and_onnx' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: A = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' ,A_ ) self.assertIn('torch_and_transformers' ,A_ ) self.assertIn('flax_and_transformers' ,A_ ) self.assertIn('torch_and_transformers_and_onnx' ,A_ ) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel' ,objects['torch'] ) self.assertIn('FlaxUNet2DConditionModel' ,objects['flax'] ) self.assertIn('StableDiffusionPipeline' ,objects['torch_and_transformers'] ) self.assertIn('FlaxStableDiffusionPipeline' ,objects['flax_and_transformers'] ) self.assertIn('LMSDiscreteScheduler' ,objects['torch_and_scipy'] ) self.assertIn('OnnxStableDiffusionPipeline' ,objects['torch_and_transformers_and_onnx'] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: A = create_dummy_object('CONSTANT' ,'\'torch\'' ) self.assertEqual(A_ ,'\nCONSTANT = None\n' ) A = create_dummy_object('function' ,'\'torch\'' ) self.assertEqual( A_ ,'\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) A = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n' A = create_dummy_object('FakeClass' ,'\'torch\'' ) self.assertEqual(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: A = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n' A = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'] ,A_ )
74
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''facebook/deit-base-distilled-patch16-224''': ( '''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json''' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''deit''' def __init__( self : int ,A_ : Optional[Any]=768 ,A_ : Union[str, Any]=12 ,A_ : Dict=12 ,A_ : int=3072 ,A_ : Optional[Any]="gelu" ,A_ : Dict=0.0 ,A_ : Any=0.0 ,A_ : str=0.02 ,A_ : Tuple=1e-12 ,A_ : Union[str, Any]=224 ,A_ : Optional[Any]=16 ,A_ : List[Any]=3 ,A_ : Optional[Any]=True ,A_ : Optional[int]=16 ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(**A_ ) A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = encoder_stride class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: int = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> float: return 1e-4
74
1
"""simple docstring""" from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ,A_ : Collection[float] | None = None ) -> None: if components is None: A = [] A = list(A_ ) def __len__( self : Union[str, Any] ) -> int: return len(self.__components ) def __str__( self : Dict ) -> str: return "(" + ",".join(map(A_ ,self.__components ) ) + ")" def __add__( self : Optional[Any] ,A_ : Vector ) -> Vector: A = len(self ) if size == len(A_ ): A = [self.__components[i] + other.component(A_ ) for i in range(A_ )] return Vector(A_ ) else: raise Exception('must have the same size' ) def __sub__( self : Optional[int] ,A_ : Vector ) -> Vector: A = len(self ) if size == len(A_ ): A = [self.__components[i] - other.component(A_ ) for i in range(A_ )] return Vector(A_ ) else: # error case raise Exception('must have the same size' ) @overload def __mul__( self : str ,A_ : float ) -> Vector: ... @overload def __mul__( self : Any ,A_ : Vector ) -> float: ... def __mul__( self : Dict ,A_ : float | Vector ) -> float | Vector: if isinstance(A_ ,(float, int) ): A = [c * other for c in self.__components] return Vector(A_ ) elif isinstance(A_ ,A_ ) and len(self ) == len(A_ ): A = len(self ) A = [self.__components[i] * other.component(A_ ) for i in range(A_ )] return sum(A_ ) else: # error case raise Exception('invalid operand!' ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Vector: return Vector(self.__components ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ) -> float: if isinstance(A_ ,A_ ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception('index out of range' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ,A_ : float ) -> None: assert -len(self.__components ) <= pos < len(self.__components ) A = value def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> float: if len(self.__components ) == 0: raise Exception('Vector is empty' ) A = [c**2 for c in self.__components] return math.sqrt(sum(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Vector ,A_ : bool = False ) -> float: A = self * other A = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def _snake_case ( snake_case__ : int ): assert isinstance(snake_case__ , snake_case__ ) return Vector([0] * dimension ) def _snake_case ( snake_case__ : int , snake_case__ : int ): assert isinstance(snake_case__ , snake_case__ ) and (isinstance(snake_case__ , snake_case__ )) A = [0] * dimension A = 1 return Vector(snake_case__ ) def _snake_case ( snake_case__ : float , snake_case__ : Vector , snake_case__ : Vector ): assert ( isinstance(snake_case__ , snake_case__ ) and isinstance(snake_case__ , snake_case__ ) and (isinstance(snake_case__ , (int, float) )) ) return x * scalar + y def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : int ): random.seed(snake_case__ ) A = [random.randint(snake_case__ , snake_case__ ) for _ in range(snake_case__ )] return Vector(snake_case__ ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any ,A_ : list[list[float]] ,A_ : int ,A_ : int ) -> None: A = matrix A = w A = h def __str__( self : int ) -> str: A = '' for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self : Tuple ,A_ : Matrix ) -> Matrix: if self.__width == other.width() and self.__height == other.height(): A = [] for i in range(self.__height ): A = [ self.__matrix[i][j] + other.component(A_ ,A_ ) for j in range(self.__width ) ] matrix.append(A_ ) return Matrix(A_ ,self.__width ,self.__height ) else: raise Exception('matrix must have the same dimension!' ) def __sub__( self : Union[str, Any] ,A_ : Matrix ) -> Matrix: if self.__width == other.width() and self.__height == other.height(): A = [] for i in range(self.__height ): A = [ self.__matrix[i][j] - other.component(A_ ,A_ ) for j in range(self.__width ) ] matrix.append(A_ ) return Matrix(A_ ,self.__width ,self.__height ) else: raise Exception('matrices must have the same dimension!' ) @overload def __mul__( self : Dict ,A_ : float ) -> Matrix: ... @overload def __mul__( self : Optional[Any] ,A_ : Vector ) -> Vector: ... def __mul__( self : List[Any] ,A_ : float | Vector ) -> Vector | Matrix: if isinstance(A_ ,A_ ): # matrix-vector if len(A_ ) == self.__width: A = zero_vector(self.__height ) for i in range(self.__height ): A = [ self.__matrix[i][j] * other.component(A_ ) for j in range(self.__width ) ] ans.change_component(A_ ,sum(A_ ) ) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!' ) elif isinstance(A_ ,(int, float) ): # matrix-scalar A = [ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(A_ ,self.__width ,self.__height ) return None def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: return self.__height def _SCREAMING_SNAKE_CASE ( self : int ) -> int: return self.__width def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ,A_ : int ) -> float: if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ,A_ : int ,A_ : float ) -> None: if 0 <= x < self.__height and 0 <= y < self.__width: A = value else: raise Exception('change_component: indices out of bounds' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : int ,A_ : int ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) A = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(A_ ) ): A = minor[i][:y] + minor[i][y + 1 :] return Matrix(A_ ,self.__width - 1 ,self.__height - 1 ).determinant() def _SCREAMING_SNAKE_CASE ( self : int ,A_ : int ,A_ : int ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(A_ ,A_ ) else: raise Exception('Indices out of bounds' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) if self.__height < 1: raise Exception('Matrix has no element' ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: A = [ self.__matrix[0][y] * self.cofactor(0 ,A_ ) for y in range(self.__width ) ] return sum(A_ ) def _snake_case ( snake_case__ : int ): A = [[0] * n for _ in range(snake_case__ )] return Matrix(snake_case__ , snake_case__ , snake_case__ ) def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int ): random.seed(snake_case__ ) A = [ [random.randint(snake_case__ , snake_case__ ) for _ in range(snake_case__ )] for _ in range(snake_case__ ) ] return Matrix(snake_case__ , snake_case__ , snake_case__ )
74
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int]=0.999 , snake_case__ : Union[str, Any]="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case__ : Union[str, Any] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case__ : Dict ): return math.exp(t * -12.0 ) else: raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' ) A = [] for i in range(snake_case__ ): A = i / num_diffusion_timesteps A = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) ) return torch.tensor(snake_case__ , dtype=torch.floataa ) class lowerCAmelCase_ ( _lowercase , _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = [e.name for e in KarrasDiffusionSchedulers] _lowerCamelCase: Optional[Any] = 2 @register_to_config def __init__( self : str ,A_ : int = 1000 ,A_ : float = 0.0_00_85 ,A_ : float = 0.0_12 ,A_ : str = "linear" ,A_ : Optional[Union[np.ndarray, List[float]]] = None ,A_ : str = "epsilon" ,A_ : Optional[bool] = False ,A_ : Optional[bool] = False ,A_ : float = 1.0 ,A_ : str = "linspace" ,A_ : int = 0 ,) -> List[str]: if trained_betas is not None: A = torch.tensor(A_ ,dtype=torch.floataa ) elif beta_schedule == "linear": A = torch.linspace(A_ ,A_ ,A_ ,dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. A = ( torch.linspace(beta_start**0.5 ,beta_end**0.5 ,A_ ,dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule A = betas_for_alpha_bar(A_ ,alpha_transform_type='cosine' ) elif beta_schedule == "exp": A = betas_for_alpha_bar(A_ ,alpha_transform_type='exp' ) else: raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' ) A = 1.0 - self.betas A = torch.cumprod(self.alphas ,dim=0 ) # set all values self.set_timesteps(A_ ,A_ ,A_ ) A = use_karras_sigmas def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : Tuple=None ) -> Tuple: if schedule_timesteps is None: A = self.timesteps A = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: A = 1 if len(A_ ) > 1 else 0 else: A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep A = self._index_counter[timestep_int] return indices[pos].item() @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : torch.FloatTensor ,A_ : Union[float, torch.FloatTensor] ,) -> torch.FloatTensor: A = self.index_for_timestep(A_ ) A = self.sigmas[step_index] A = sample / ((sigma**2 + 1) ** 0.5) return sample def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : Union[str, torch.device] = None ,A_ : Optional[int] = None ,) -> Optional[Any]: A = num_inference_steps A = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": A = np.linspace(0 ,num_train_timesteps - 1 ,A_ ,dtype=A_ )[::-1].copy() elif self.config.timestep_spacing == "leading": A = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 A = (np.arange(0 ,A_ ) * step_ratio).round()[::-1].copy().astype(A_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": A = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 A = (np.arange(A_ ,0 ,-step_ratio )).round().copy().astype(A_ ) timesteps -= 1 else: raise ValueError( F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' ) A = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) A = np.log(A_ ) A = np.interp(A_ ,np.arange(0 ,len(A_ ) ) ,A_ ) if self.config.use_karras_sigmas: A = self._convert_to_karras(in_sigmas=A_ ,num_inference_steps=self.num_inference_steps ) A = np.array([self._sigma_to_t(A_ ,A_ ) for sigma in sigmas] ) A = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) A = torch.from_numpy(A_ ).to(device=A_ ) A = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) A = torch.from_numpy(A_ ) A = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(A_ ).startswith('mps' ): # mps does not support float64 A = timesteps.to(A_ ,dtype=torch.floataa ) else: A = timesteps.to(device=A_ ) # empty dt and derivative A = None A = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter A = defaultdict(A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : List[str] ) -> Dict: # get log sigma A = np.log(A_ ) # get distribution A = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range A = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) A = low_idx + 1 A = log_sigmas[low_idx] A = log_sigmas[high_idx] # interpolate sigmas A = (low - log_sigma) / (low - high) A = np.clip(A_ ,0 ,1 ) # transform interpolation to time range A = (1 - w) * low_idx + w * high_idx A = t.reshape(sigma.shape ) return t def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : torch.FloatTensor ,A_ : int ) -> torch.FloatTensor: A = in_sigmas[-1].item() A = in_sigmas[0].item() A = 7.0 # 7.0 is the value used in the paper A = np.linspace(0 ,1 ,A_ ) A = sigma_min ** (1 / rho) A = sigma_max ** (1 / rho) A = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: return self.dt is None def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : Union[float, torch.FloatTensor] ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : bool = True ,) -> Union[SchedulerOutput, Tuple]: A = self.index_for_timestep(A_ ) # advance index counter by 1 A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: A = self.sigmas[step_index] A = self.sigmas[step_index + 1] else: # 2nd order / Heun's method A = self.sigmas[step_index - 1] A = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API A = 0 A = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": A = sigma_hat if self.state_in_first_order else sigma_next A = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": A = sigma_hat if self.state_in_first_order else sigma_next A = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": A = model_output else: raise ValueError( F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' ) if self.config.clip_sample: A = pred_original_sample.clamp( -self.config.clip_sample_range ,self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order A = (sample - pred_original_sample) / sigma_hat # 3. delta timestep A = sigma_next - sigma_hat # store for 2nd order step A = derivative A = dt A = sample else: # 2. 2nd order / Heun's method A = (sample - pred_original_sample) / sigma_next A = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample A = self.dt A = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" A = None A = None A = None A = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=A_ ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples A = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(A_ ): # mps does not support float64 A = self.timesteps.to(original_samples.device ,dtype=torch.floataa ) A = timesteps.to(original_samples.device ,dtype=torch.floataa ) else: A = self.timesteps.to(original_samples.device ) A = timesteps.to(original_samples.device ) A = [self.index_for_timestep(A_ ,A_ ) for t in timesteps] A = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): A = sigma.unsqueeze(-1 ) A = original_samples + noise * sigma return noisy_samples def __len__( self : Dict ) -> int: return self.config.num_train_timesteps
74
1
"""simple docstring""" import logging import os from .state import PartialState class lowerCAmelCase_ ( logging.LoggerAdapter ): '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : Optional[int] ) -> str: A = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Dict ,A_ : Optional[int] ,*A_ : int ,**A_ : str ) -> Union[str, Any]: if PartialState._shared_state == {}: raise RuntimeError( 'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' ) A = kwargs.pop('main_process_only' ,A_ ) A = kwargs.pop('in_order' ,A_ ) if self.isEnabledFor(A_ ): if self._should_log(A_ ): A , A = self.process(A_ ,A_ ) self.logger.log(A_ ,A_ ,*A_ ,**A_ ) elif in_order: A = PartialState() for i in range(state.num_processes ): if i == state.process_index: A , A = self.process(A_ ,A_ ) self.logger.log(A_ ,A_ ,*A_ ,**A_ ) state.wait_for_everyone() def _snake_case ( snake_case__ : str , snake_case__ : str = None ): if log_level is None: A = os.environ.get('ACCELERATE_LOG_LEVEL' , snake_case__ ) A = logging.getLogger(snake_case__ ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(snake_case__ , {} )
74
"""simple docstring""" class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Dict ,A_ : list[int] ) -> None: A = len(A_ ) A = [0] * len_array if len_array > 0: A = array[0] for i in range(1 ,A_ ): A = self.prefix_sum[i - 1] + array[i] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> int: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ) -> bool: A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(A_ ) return False if __name__ == "__main__": import doctest doctest.testmod()
74
1
"""simple docstring""" from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
74
"""simple docstring""" import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def _snake_case ( snake_case__ : str , snake_case__ : str ): A = RobertaPreLayerNormConfig.from_pretrained( snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] ) # convert state_dict A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) ) A = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.' ): A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ): continue A = tensor_value A = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ ) model.save_pretrained(snake_case__ ) # convert tokenizer A = AutoTokenizer.from_pretrained(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowercase = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
74
1
"""simple docstring""" import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging _lowercase = ( '''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py''' ) _lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name def _snake_case ( ): A = 'https://pypi.org/pypi/diffusers/json' A = json.loads(request.urlopen(snake_case__ ).read() )['releases'].keys() return sorted(snake_case__ , key=lambda snake_case__ : version.Version(snake_case__ ) ) def _snake_case ( ): # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(snake_case__ ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) A = Path(snake_case__ ) / '__init__.py' if not init_path.exists(): init_path.touch() def _snake_case ( snake_case__ : Union[str, os.PathLike] ): init_hf_modules() A = Path(snake_case__ ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) A = dynamic_module_path / '__init__.py' if not init_path.exists(): init_path.touch() def _snake_case ( snake_case__ : Union[str, Any] ): with open(snake_case__ , 'r' , encoding='utf-8' ) as f: A = f.read() # Imports of the form `import .xxx` A = re.findall('^\s*import\s+\.(\S+)\s*$' , snake_case__ , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , snake_case__ , flags=re.MULTILINE ) # Unique-ify return list(set(snake_case__ ) ) def _snake_case ( snake_case__ : str ): A = False A = [module_file] A = [] # Let's recurse through all relative imports while not no_change: A = [] for f in files_to_check: new_imports.extend(get_relative_imports(snake_case__ ) ) A = Path(snake_case__ ).parent A = [str(module_path / m ) for m in new_imports] A = [f for f in new_import_files if f not in all_relative_imports] A = [F'{f}.py' for f in new_import_files] A = len(snake_case__ ) == 0 all_relative_imports.extend(snake_case__ ) return all_relative_imports def _snake_case ( snake_case__ : int ): with open(snake_case__ , 'r' , encoding='utf-8' ) as f: A = f.read() # Imports of the form `import xxx` A = re.findall('^\s*import\s+(\S+)\s*$' , snake_case__ , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('^\s*from\s+(\S+)\s+import' , snake_case__ , flags=re.MULTILINE ) # Only keep the top-level module A = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )] # Unique-ify and test we got them all A = list(set(snake_case__ ) ) A = [] for imp in imports: try: importlib.import_module(snake_case__ ) except ImportError: missing_packages.append(snake_case__ ) if len(snake_case__ ) > 0: raise ImportError( 'This modeling file requires the following packages that were not found in your environment: ' F'{", ".join(snake_case__ )}. Run `pip install {" ".join(snake_case__ )}`' ) return get_relative_imports(snake_case__ ) def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[Any] ): A = module_path.replace(os.path.sep , '.' ) A = importlib.import_module(snake_case__ ) if class_name is None: return find_pipeline_class(snake_case__ ) return getattr(snake_case__ , snake_case__ ) def _snake_case ( snake_case__ : List[str] ): from ..pipelines import DiffusionPipeline A = dict(inspect.getmembers(snake_case__ , inspect.isclass ) ) A = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , snake_case__ ) and cls.__module__.split('.' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:' F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in' F' {loaded_module}.' ) A = cls return pipeline_class def _snake_case ( snake_case__ : Union[str, os.PathLike] , snake_case__ : str , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , ): A = str(snake_case__ ) A = os.path.join(snake_case__ , snake_case__ ) if os.path.isfile(snake_case__ ): A = module_file_or_url A = 'local' elif pretrained_model_name_or_path.count('/' ) == 0: A = get_diffusers_versions() # cut ".dev0" A = 'v' + '.'.join(__version__.split('.' )[:3] ) # retrieve github version that matches if revision is None: A = latest_version if latest_version[1:] in available_versions else 'main' logger.info(F'Defaulting to latest_version: {revision}.' ) elif revision in available_versions: A = F'v{revision}' elif revision == "main": A = revision else: raise ValueError( F'`custom_revision`: {revision} does not exist. Please make sure to choose one of' F' {", ".join(available_versions + ["main"] )}.' ) # community pipeline on GitHub A = COMMUNITY_PIPELINES_URL.format(revision=snake_case__ , pipeline=snake_case__ ) try: A = cached_download( snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , ) A = 'git' A = pretrained_model_name_or_path + '.py' except EnvironmentError: logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise else: try: # Load from URL or cache if already cached A = hf_hub_download( snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , ) A = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) ) except EnvironmentError: logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise # Check we have all the requirements in our environment A = check_imports(snake_case__ ) # Now we move the module inside our cached dynamic modules. A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(snake_case__ ) A = Path(snake_case__ ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(snake_case__ , submodule_path / module_file ) for module_needed in modules_needed: A = F'{module_needed}.py' shutil.copy(os.path.join(snake_case__ , snake_case__ ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(snake_case__ , snake_case__ ): A = use_auth_token elif use_auth_token is True: A = HfFolder.get_token() else: A = None A = model_info(snake_case__ , revision=snake_case__ , token=snake_case__ ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. A = submodule_path / commit_hash A = full_submodule + os.path.sep + commit_hash create_dynamic_module(snake_case__ ) if not (submodule_path / module_file).exists(): shutil.copy(snake_case__ , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( snake_case__ , F'{module_needed}.py' , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , ) return os.path.join(snake_case__ , snake_case__ ) def _snake_case ( snake_case__ : Union[str, os.PathLike] , snake_case__ : str , snake_case__ : Optional[str] = None , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , **snake_case__ : List[Any] , ): A = get_cached_module_file( snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , ) return get_class_in_module(snake_case__ , final_module.replace('.py' , '' ) )
74
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''roformer''' def __init__( self : Tuple ,A_ : Optional[int]=5_0000 ,A_ : Tuple=None ,A_ : Optional[Any]=768 ,A_ : Dict=12 ,A_ : Optional[int]=12 ,A_ : Union[str, Any]=3072 ,A_ : Dict="gelu" ,A_ : Dict=0.1 ,A_ : List[Any]=0.1 ,A_ : List[Any]=1536 ,A_ : List[str]=2 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : Optional[int]=0 ,A_ : List[str]=False ,A_ : Tuple=True ,**A_ : List[str] ,) -> Dict: super().__init__(pad_token_id=A_ ,**A_ ) A = vocab_size A = hidden_size if embedding_size is None else embedding_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_act A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = initializer_range A = layer_norm_eps A = rotary_value A = use_cache class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": A = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A = {0: 'batch', 1: 'sequence'} A = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
74
1
"""simple docstring""" import datasets _lowercase = '''\ @InProceedings{conneau2018xnli, author = "Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin", title = "XNLI: Evaluating Cross-lingual Sentence Representations", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", year = "2018", publisher = "Association for Computational Linguistics", location = "Brussels, Belgium", } ''' _lowercase = '''\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). ''' _lowercase = ''' Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: \'accuracy\': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric("xnli") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} ''' def _snake_case ( snake_case__ : Dict , snake_case__ : List[str] ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ), 'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ), } ) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' ,) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Union[str, Any] ) -> Optional[Any]: return {"accuracy": simple_accuracy(A_ ,A_ )}
74
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _snake_case ( snake_case__ : Dict ): A = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) def _snake_case ( snake_case__ : int ): A , A = emb.weight.shape A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) A = emb.weight.data return lin_layer def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ): A = torch.load(snake_case__ , map_location='cpu' )['model'] remove_ignore_keys_(snake_case__ ) A = state_dict['encoder.embed_tokens.weight'].shape[0] A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ ) if mbart_aa and finetuned: A = 'relu' A = state_dict['decoder.embed_tokens.weight'] A = MBartForConditionalGeneration(snake_case__ ) model.model.load_state_dict(snake_case__ ) if finetuned: A = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') _lowercase = parser.parse_args() _lowercase = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
74
1
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowercase = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''') @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = PegasusTokenizer _lowerCamelCase: Tuple = PegasusTokenizerFast _lowerCamelCase: List[Any] = True _lowerCamelCase: List[Any] = True def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: super().setUp() # We have a SentencePiece fixture for testing A = PegasusTokenizer(A_ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: return PegasusTokenizer.from_pretrained('google/pegasus-large' ) def _SCREAMING_SNAKE_CASE ( self : Dict ,**A_ : int ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Dict ) -> str: return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = '</s>' A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) ,A_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'<pad>' ) self.assertEqual(vocab_keys[1] ,'</s>' ) self.assertEqual(vocab_keys[-1] ,'v' ) self.assertEqual(len(A_ ) ,1103 ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size ,1103 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) A = self.tokenizer_class.from_pretrained(self.tmpdirname ) A = ( 'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important' ' </s> <pad> <pad> <pad>' ) A = rust_tokenizer([raw_input_str] ,return_tensors=A_ ,add_special_tokens=A_ ).input_ids[0] A = py_tokenizer([raw_input_str] ,return_tensors=A_ ,add_special_tokens=A_ ).input_ids[0] self.assertListEqual(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: A = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word A = '<mask_1> To ensure a <mask_2> flow of bank resolutions.' A = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] A = tokenizer([raw_input_str] ,return_tensors=A_ ).input_ids[0] self.assertListEqual(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: A = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 A = 'To ensure a smooth flow of bank resolutions.' A = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] A = tokenizer([raw_input_str] ,return_tensors=A_ ).input_ids[0] self.assertListEqual(A_ ,A_ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: A = ['This is going to be way too long.' * 150, 'short example'] A = ['not super long but more than 5 tokens', 'tiny'] A = self._large_tokenizer(A_ ,padding=A_ ,truncation=A_ ,return_tensors='pt' ) A = self._large_tokenizer( text_target=A_ ,max_length=5 ,padding=A_ ,truncation=A_ ,return_tensors='pt' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(A_ ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: # fmt: off A = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A_ ,model_name='google/bigbird-pegasus-large-arxiv' ,revision='ba85d0851d708441f91440d509690f1ab6353415' ,) @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = PegasusTokenizer _lowerCamelCase: Dict = PegasusTokenizerFast _lowerCamelCase: Tuple = True _lowerCamelCase: Any = True def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: super().setUp() # We have a SentencePiece fixture for testing A = PegasusTokenizer(A_ ,offset=0 ,mask_token_sent=A_ ,mask_token='[MASK]' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' ) def _SCREAMING_SNAKE_CASE ( self : Dict ,**A_ : List[Any] ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ) -> int: return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) A = self.tokenizer_class.from_pretrained(self.tmpdirname ) A = ( 'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>' ' <pad> <pad> <pad>' ) A = rust_tokenizer([raw_input_str] ,return_tensors=A_ ,add_special_tokens=A_ ).input_ids[0] A = py_tokenizer([raw_input_str] ,return_tensors=A_ ,add_special_tokens=A_ ).input_ids[0] self.assertListEqual(A_ ,A_ ) @require_torch def _SCREAMING_SNAKE_CASE ( self : int ) -> Any: A = ['This is going to be way too long.' * 1000, 'short example'] A = ['not super long but more than 5 tokens', 'tiny'] A = self._large_tokenizer(A_ ,padding=A_ ,truncation=A_ ,return_tensors='pt' ) A = self._large_tokenizer( text_target=A_ ,max_length=5 ,padding=A_ ,truncation=A_ ,return_tensors='pt' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(A_ ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: A = ( 'This is an example string that is used to test the original TF implementation against the HF' ' implementation' ) A = self._large_tokenizer(A_ ).input_ids self.assertListEqual( A_ ,[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] ,)
74
"""simple docstring""" import argparse import struct import unittest class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : bytes ) -> None: A = data # Initialize hash values A = [ 0X6_A_0_9_E_6_6_7, 0XB_B_6_7_A_E_8_5, 0X3_C_6_E_F_3_7_2, 0XA_5_4_F_F_5_3_A, 0X5_1_0_E_5_2_7_F, 0X9_B_0_5_6_8_8_C, 0X1_F_8_3_D_9_A_B, 0X5_B_E_0_C_D_1_9, ] # Initialize round constants A = [ 0X4_2_8_A_2_F_9_8, 0X7_1_3_7_4_4_9_1, 0XB_5_C_0_F_B_C_F, 0XE_9_B_5_D_B_A_5, 0X3_9_5_6_C_2_5_B, 0X5_9_F_1_1_1_F_1, 0X9_2_3_F_8_2_A_4, 0XA_B_1_C_5_E_D_5, 0XD_8_0_7_A_A_9_8, 0X1_2_8_3_5_B_0_1, 0X2_4_3_1_8_5_B_E, 0X5_5_0_C_7_D_C_3, 0X7_2_B_E_5_D_7_4, 0X8_0_D_E_B_1_F_E, 0X9_B_D_C_0_6_A_7, 0XC_1_9_B_F_1_7_4, 0XE_4_9_B_6_9_C_1, 0XE_F_B_E_4_7_8_6, 0X0_F_C_1_9_D_C_6, 0X2_4_0_C_A_1_C_C, 0X2_D_E_9_2_C_6_F, 0X4_A_7_4_8_4_A_A, 0X5_C_B_0_A_9_D_C, 0X7_6_F_9_8_8_D_A, 0X9_8_3_E_5_1_5_2, 0XA_8_3_1_C_6_6_D, 0XB_0_0_3_2_7_C_8, 0XB_F_5_9_7_F_C_7, 0XC_6_E_0_0_B_F_3, 0XD_5_A_7_9_1_4_7, 0X0_6_C_A_6_3_5_1, 0X1_4_2_9_2_9_6_7, 0X2_7_B_7_0_A_8_5, 0X2_E_1_B_2_1_3_8, 0X4_D_2_C_6_D_F_C, 0X5_3_3_8_0_D_1_3, 0X6_5_0_A_7_3_5_4, 0X7_6_6_A_0_A_B_B, 0X8_1_C_2_C_9_2_E, 0X9_2_7_2_2_C_8_5, 0XA_2_B_F_E_8_A_1, 0XA_8_1_A_6_6_4_B, 0XC_2_4_B_8_B_7_0, 0XC_7_6_C_5_1_A_3, 0XD_1_9_2_E_8_1_9, 0XD_6_9_9_0_6_2_4, 0XF_4_0_E_3_5_8_5, 0X1_0_6_A_A_0_7_0, 0X1_9_A_4_C_1_1_6, 0X1_E_3_7_6_C_0_8, 0X2_7_4_8_7_7_4_C, 0X3_4_B_0_B_C_B_5, 0X3_9_1_C_0_C_B_3, 0X4_E_D_8_A_A_4_A, 0X5_B_9_C_C_A_4_F, 0X6_8_2_E_6_F_F_3, 0X7_4_8_F_8_2_E_E, 0X7_8_A_5_6_3_6_F, 0X8_4_C_8_7_8_1_4, 0X8_C_C_7_0_2_0_8, 0X9_0_B_E_F_F_F_A, 0XA_4_5_0_6_C_E_B, 0XB_E_F_9_A_3_F_7, 0XC_6_7_1_7_8_F_2, ] A = self.preprocessing(self.data ) self.final_hash() @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : bytes ) -> bytes: A = B'\x80' + (B'\x00' * (63 - (len(A_ ) + 8) % 64)) A = struct.pack('>Q' ,(len(A_ ) * 8) ) return data + padding + big_endian_integer def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None: # Convert into blocks of 64 bytes A = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers A = list(struct.unpack('>16L' ,A_ ) ) # add 48 0-ed integers words += [0] * 48 A , A , A , A , A , A , A , A = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array A = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) A = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) A = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X1_0_0_0_0_0_0_0_0 # Compression A = self.ror(A_ ,6 ) ^ self.ror(A_ ,11 ) ^ self.ror(A_ ,25 ) A = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g) A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X1_0_0_0_0_0_0_0_0 A = self.ror(A_ ,2 ) ^ self.ror(A_ ,13 ) ^ self.ror(A_ ,22 ) A = (a & b) ^ (a & c) ^ (b & c) A = (sa + maj) % 0X1_0_0_0_0_0_0_0_0 A , A , A , A , A , A , A , A = ( g, f, e, ((d + tempa) % 0X1_0_0_0_0_0_0_0_0), c, b, a, ((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0), ) A = [a, b, c, d, e, f, g, h] # Modify final values A = [ ((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0) for index, element in enumerate(self.hashes ) ] A = ''.join([hex(A_ )[2:].zfill(8 ) for value in self.hashes] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : int ) -> int: return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations) class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None: import hashlib A = bytes('Test String' ,'utf-8' ) self.assertEqual(SHAaaa(A_ ).hash ,hashlib.shaaaa(A_ ).hexdigest() ) def _snake_case ( ): import doctest doctest.testmod() A = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) A = parser.parse_args() A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: A = f.read() else: A = bytes(snake_case__ , 'utf-8' ) print(SHAaaa(snake_case__ ).hash ) if __name__ == "__main__": main()
74
1
"""simple docstring""" from math import pow, sqrt def _snake_case ( *snake_case__ : float ): A = len(snake_case__ ) > 0 and all(value > 0.0 for value in values ) return result def _snake_case ( snake_case__ : float , snake_case__ : float ): return ( round(sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(snake_case__ , snake_case__ ) else ValueError('Input Error: Molar mass values must greater than 0.' ) ) def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ): return ( round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(snake_case__ , snake_case__ , snake_case__ ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) ) def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ): return ( round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(snake_case__ , snake_case__ , snake_case__ ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) ) def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ): return ( round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 ) if validate(snake_case__ , snake_case__ , snake_case__ ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) ) def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ): return ( round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 ) if validate(snake_case__ , snake_case__ , snake_case__ ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) )
74
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowercase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''DeiTFeatureExtractor'''] _lowercase = ['''DeiTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DeiTForImageClassification''', '''DeiTForImageClassificationWithTeacher''', '''DeiTForMaskedImageModeling''', '''DeiTModel''', '''DeiTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDeiTForImageClassification''', '''TFDeiTForImageClassificationWithTeacher''', '''TFDeiTForMaskedImageModeling''', '''TFDeiTModel''', '''TFDeiTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
74
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def _snake_case ( snake_case__ : Optional[int] ): A = SwinConfig( embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , ) A = DetaConfig( backbone_config=snake_case__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=snake_case__ , with_box_refine=snake_case__ , two_stage=snake_case__ , ) # set labels A = 'huggingface/label-files' if "o365" in model_name: A = 366 A = 'object365-id2label.json' else: A = 91 A = 'coco-detection-id2label.json' A = num_labels A = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) ) A = {int(snake_case__ ): v for k, v in idalabel.items()} A = idalabel A = {v: k for k, v in idalabel.items()} return config def _snake_case ( snake_case__ : Optional[Any] ): A = [] # stem # fmt: off rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') ) rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((F'backbone.0.body.layers.{i}.downsample.reduction.weight', F'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.weight', F'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.bias', F'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') ) rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') ) rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') ) rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') ) rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') ) rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', F'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', F'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', F'model.encoder.layers.{i}.self_attn.attention_weights.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', F'model.encoder.layers.{i}.self_attn.attention_weights.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.weight', F'model.encoder.layers.{i}.self_attn.value_proj.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.bias', F'model.encoder.layers.{i}.self_attn.value_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.weight', F'model.encoder.layers.{i}.self_attn.output_proj.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.bias', F'model.encoder.layers.{i}.self_attn.output_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.weight', F'model.encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'model.encoder.layers.{i}.self_attn_layer_norm.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'model.encoder.layers.{i}.fc1.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'model.encoder.layers.{i}.fc1.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'model.encoder.layers.{i}.fc2.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'model.encoder.layers.{i}.fc2.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'model.encoder.layers.{i}.final_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'model.encoder.layers.{i}.final_layer_norm.bias') ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', F'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', F'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', F'model.decoder.layers.{i}.encoder_attn.value_proj.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', F'model.decoder.layers.{i}.encoder_attn.value_proj.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', F'model.decoder.layers.{i}.encoder_attn.output_proj.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', F'model.decoder.layers.{i}.encoder_attn.output_proj.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.weight', F'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'model.decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'model.decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm2.weight', F'model.decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm2.bias', F'model.decoder.layers.{i}.self_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'model.decoder.layers.{i}.fc1.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'model.decoder.layers.{i}.fc1.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'model.decoder.layers.{i}.fc2.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'model.decoder.layers.{i}.fc2.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'model.decoder.layers.{i}.final_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'model.decoder.layers.{i}.final_layer_norm.bias') ) # fmt: on return rename_keys def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Dict ): A = dct.pop(snake_case__ ) A = val def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ): A = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): A = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) A = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' ) A = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A = in_proj_weight[:dim, :] A = in_proj_bias[: dim] A = in_proj_weight[ dim : dim * 2, : ] A = in_proj_bias[ dim : dim * 2 ] A = in_proj_weight[ -dim :, : ] A = in_proj_bias[-dim :] # fmt: on def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ): # transformer decoder self-attention layers A = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention A = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) A = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A = in_proj_weight[:hidden_size, :] A = in_proj_bias[:hidden_size] A = in_proj_weight[ hidden_size : hidden_size * 2, : ] A = in_proj_bias[hidden_size : hidden_size * 2] A = in_proj_weight[-hidden_size:, :] A = in_proj_bias[-hidden_size:] def _snake_case ( ): A = 'http://images.cocodataset.org/val2017/000000039769.jpg' A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im @torch.no_grad() def _snake_case ( snake_case__ : List[str] , snake_case__ : Any , snake_case__ : List[str] ): A = get_deta_config(snake_case__ ) # load original state dict if model_name == "deta-swin-large": A = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' ) elif model_name == "deta-swin-large-o365": A = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' ) else: raise ValueError(F'Model name {model_name} not supported' ) A = torch.load(snake_case__ , map_location='cpu' )['model'] # original state dict for name, param in state_dict.items(): print(snake_case__ , param.shape ) # rename keys A = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) read_in_swin_q_k_v(snake_case__ , config.backbone_config ) read_in_decoder_q_k_v(snake_case__ , snake_case__ ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: A = state_dict.pop(snake_case__ ) A = val if "input_proj" in key: A = state_dict.pop(snake_case__ ) A = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: A = state_dict.pop(snake_case__ ) A = val # finally, create HuggingFace model and load state dict A = DetaForObjectDetection(snake_case__ ) model.load_state_dict(snake_case__ ) model.eval() A = 'cuda' if torch.cuda.is_available() else 'cpu' model.to(snake_case__ ) # load image processor A = DetaImageProcessor(format='coco_detection' ) # verify our conversion on image A = prepare_img() A = processor(images=snake_case__ , return_tensors='pt' ) A = encoding['pixel_values'] A = model(pixel_values.to(snake_case__ ) ) # verify logits print('Logits:' , outputs.logits[0, :3, :3] ) print('Boxes:' , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": A = torch.tensor( [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] ) A = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] ) elif model_name == "deta-swin-large-o365": A = torch.tensor( [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] ) A = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(snake_case__ ) , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(snake_case__ ) , atol=1e-4 ) print('Everything ok!' ) if pytorch_dump_folder_path: # Save model and processor logger.info(F'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) model.save_pretrained(snake_case__ ) processor.save_pretrained(snake_case__ ) # Push to hub if push_to_hub: print('Pushing model and processor to hub...' ) model.push_to_hub(F'jozhang97/{model_name}' ) processor.push_to_hub(F'jozhang97/{model_name}' ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '''--model_name''', type=str, default='''deta-swin-large''', choices=['''deta-swin-large''', '''deta-swin-large-o365'''], help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _lowercase = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
"""simple docstring""" from __future__ import annotations import requests def _snake_case ( snake_case__ : str ): A = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty' return requests.get(snake_case__ ).json() def _snake_case ( snake_case__ : int = 10 ): A = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' A = requests.get(snake_case__ ).json()[:max_stories] return [get_hackernews_story(snake_case__ ) for story_id in story_ids] def _snake_case ( snake_case__ : int = 10 ): A = hackernews_top_stories(snake_case__ ) return "\n".join('* [{title}]({url})'.format(**snake_case__ ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
74
1
"""simple docstring""" def _snake_case ( snake_case__ : int , snake_case__ : int ): while second != 0: A = first & second first ^= second A = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() _lowercase = int(input('''Enter the first number: ''').strip()) _lowercase = int(input('''Enter the second number: ''').strip()) print(F"""{add(first, second) = }""")
74
"""simple docstring""" from string import ascii_uppercase _lowercase = {char: i for i, char in enumerate(ascii_uppercase)} _lowercase = dict(enumerate(ascii_uppercase)) def _snake_case ( snake_case__ : str , snake_case__ : str ): A = len(snake_case__ ) A = 0 while True: if x == i: A = 0 if len(snake_case__ ) == len(snake_case__ ): break key += key[i] i += 1 return key def _snake_case ( snake_case__ : str , snake_case__ : str ): A = '' A = 0 for letter in message: if letter == " ": cipher_text += " " else: A = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def _snake_case ( snake_case__ : str , snake_case__ : str ): A = '' A = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: A = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def _snake_case ( ): A = 'THE GERMAN ATTACK' A = 'SECRET' A = generate_key(snake_case__ , snake_case__ ) A = cipher_text(snake_case__ , snake_case__ ) print(F'Encrypted Text = {s}' ) print(F'Original Text = {original_text(snake_case__ , snake_case__ )}' ) if __name__ == "__main__": import doctest doctest.testmod() main()
74
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json''' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''speech_to_text''' _lowerCamelCase: Union[str, Any] = ['''past_key_values'''] _lowerCamelCase: Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Union[str, Any] ,A_ : Tuple=1_0000 ,A_ : Union[str, Any]=12 ,A_ : int=2048 ,A_ : List[Any]=4 ,A_ : int=6 ,A_ : List[Any]=2048 ,A_ : List[str]=4 ,A_ : Optional[Any]=0.0 ,A_ : str=0.0 ,A_ : Dict=True ,A_ : List[Any]=True ,A_ : int="relu" ,A_ : List[str]=256 ,A_ : Dict=0.1 ,A_ : List[Any]=0.0 ,A_ : List[Any]=0.0 ,A_ : Union[str, Any]=0.02 ,A_ : str=2 ,A_ : Union[str, Any]=True ,A_ : List[str]=1 ,A_ : Union[str, Any]=0 ,A_ : int=2 ,A_ : int=6000 ,A_ : Dict=1024 ,A_ : str=2 ,A_ : Tuple=(5, 5) ,A_ : List[str]=1024 ,A_ : str=80 ,A_ : int=1 ,**A_ : Union[str, Any] ,) -> Optional[Any]: A = vocab_size A = d_model A = encoder_ffn_dim A = encoder_layers A = encoder_attention_heads A = decoder_ffn_dim A = decoder_layers A = decoder_attention_heads A = dropout A = attention_dropout A = activation_dropout A = activation_function A = init_std A = encoder_layerdrop A = decoder_layerdrop A = use_cache A = encoder_layers A = scale_embedding # scale factor will be sqrt(d_model) if True A = max_source_positions A = max_target_positions A = num_conv_layers A = list(A_ ) A = conv_channels A = input_feat_per_channel A = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` ' F'but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ' F'`config.num_conv_layers = {self.num_conv_layers}`.' ) super().__init__( pad_token_id=A_ ,bos_token_id=A_ ,eos_token_id=A_ ,is_encoder_decoder=A_ ,decoder_start_token_id=A_ ,**A_ ,)
74
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ) -> Optional[Any]: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): A = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: A = 'sgugger/tiny-distilbert-classification' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,only_pretrain_model=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,torchscript=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,fpaa=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: A = 'sshleifer/tiny-gpt2' A = AutoConfig.from_pretrained(A_ ) # set architectures equal to `None` A = None A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=A_ ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = 'sshleifer/tiny-gpt2' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = 'sshleifer/tinier_bart' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'sshleifer/tiny-gpt2' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'sshleifer/tinier_bart' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: A = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,save_to_csv=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(A_ ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(A_ ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(A_ ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(A_ ,'train_time.csv' ) ,env_info_csv_file=os.path.join(A_ ,'env.csv' ) ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) benchmark.run() self.assertTrue(Path(os.path.join(A_ ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'train_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'train_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'env.csv' ) ).exists() ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: A = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(A_ : Optional[int] ): self.assertTrue(hasattr(A_ ,'sequential' ) ) self.assertTrue(hasattr(A_ ,'cumulative' ) ) self.assertTrue(hasattr(A_ ,'current' ) ) self.assertTrue(hasattr(A_ ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(A_ ,'log.txt' ) ,log_print=A_ ,trace_memory_line_by_line=A_ ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(A_ ,'log.txt' ) ).exists() )
74
1
"""simple docstring""" from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata _lowercase = '''''' if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''): class lowerCAmelCase_ ( tr.AbstractTransform ): '''simple docstring''' def __init__( self : str ,A_ : str = " " ) -> Tuple: A = sentence_delimiter def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ) -> Optional[int]: return list(A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[str] ) -> Any: A = [] for sent_idx, sentence in enumerate(A_ ): chars.extend(self.process_string(A_ ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(A_ ) - 1: chars.append(self.sentence_delimiter ) return chars _lowercase = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: _lowercase = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) _lowercase = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' _lowercase = '''\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. ''' _lowercase = ''' Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> cer = datasets.load_metric("cer") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Value('string' ,id='sequence' ), 'references': datasets.Value('string' ,id='sequence' ), } ) ,codebase_urls=['https://github.com/jitsi/jiwer/'] ,reference_urls=[ 'https://en.wikipedia.org/wiki/Word_error_rate', 'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates', ] ,) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : str=False ) -> Tuple: if concatenate_texts: return jiwer.compute_measures( A_ ,A_ ,truth_transform=A_ ,hypothesis_transform=A_ ,)["wer"] A = 0 A = 0 for prediction, reference in zip(A_ ,A_ ): A = jiwer.compute_measures( A_ ,A_ ,truth_transform=A_ ,hypothesis_transform=A_ ,) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
74
"""simple docstring""" # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _lowercase = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[Union[str, int]] = None _lowerCamelCase: Optional[Union[str, int]] = None _lowerCamelCase: Optional[Union[str, int]] = None def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: A , A , A = _str_to_version_tuple(self.version_str ) def __repr__( self : Optional[int] ) -> Dict: return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: return self.major, self.minor, self.patch def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ) -> Union[str, Any]: if isinstance(A_ ,A_ ): return Version(A_ ) elif isinstance(A_ ,A_ ): return other raise TypeError(F'{other} (type {type(A_ )}) cannot be compared to version.' ) def __eq__( self : List[Any] ,A_ : Dict ) -> Any: try: A = self._validate_operand(A_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : List[Any] ,A_ : Optional[int] ) -> Tuple: A = self._validate_operand(A_ ) return self.tuple < other.tuple def __hash__( self : Union[str, Any] ) -> Union[str, Any]: return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : List[str] ) -> List[str]: A = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self.version_str def _snake_case ( snake_case__ : List[str] ): A = _VERSION_REG.match(snake_case__ ) if not res: raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] ) def _snake_case ( snake_case__ : str ): return ".".join(str(snake_case__ ) for v in version_tuple )
74
1
"""simple docstring""" from __future__ import annotations from cmath import sqrt def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : int ): if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) A = b * b - 4 * a * c A = (-b + sqrt(snake_case__ )) / (2 * a) A = (-b - sqrt(snake_case__ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def _snake_case ( ): A , A = quadratic_roots(a=5 , b=6 , c=1 ) print(F'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
74
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml _lowercase = NewType('''DataClass''', Any) _lowercase = NewType('''DataClassType''', Any) def _snake_case ( snake_case__ : Tuple ): if isinstance(snake_case__ , snake_case__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' ) def _snake_case ( snake_case__ : list ): A = {str(snake_case__ ): choice for choice in choices} return lambda snake_case__ : str_to_choice.get(snake_case__ , snake_case__ ) def _snake_case ( *, snake_case__ : Union[str, List[str]] = None , snake_case__ : str = None , snake_case__ : Any = dataclasses.MISSING , snake_case__ : Callable[[], Any] = dataclasses.MISSING , snake_case__ : dict = None , **snake_case__ : Any , ): if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls A = {} if aliases is not None: A = aliases if help is not None: A = help return dataclasses.field(metadata=snake_case__ , default=snake_case__ , default_factory=snake_case__ , **snake_case__ ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Iterable[DataClassType] def __init__( self : List[str] ,A_ : Union[DataClassType, Iterable[DataClassType]] ,**A_ : Any ) -> Optional[int]: # To make the default appear when using --help if "formatter_class" not in kwargs: A = ArgumentDefaultsHelpFormatter super().__init__(**A_ ) if dataclasses.is_dataclass(A_ ): A = [dataclass_types] A = list(A_ ) for dtype in self.dataclass_types: self._add_dataclass_arguments(A_ ) @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ,A_ : dataclasses.Field ) -> Optional[Any]: A = F'--{field.name}' A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type ,A_ ): raise RuntimeError( 'Unresolved type detected, which should have been done with the help of ' '`typing.get_type_hints` method by default' ) A = kwargs.pop('aliases' ,[] ) if isinstance(A_ ,A_ ): A = [aliases] A = getattr(field.type ,'__origin__' ,field.type ) if origin_type is Union or (hasattr(A_ ,'UnionType' ) and isinstance(A_ ,types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(A_ ) not in field.type.__args__ ): raise ValueError( 'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because' ' the argument parser only supports one type per argument.' F' Problem encountered in field \'{field.name}\'.' ) if type(A_ ) not in field.type.__args__: # filter `str` in Union A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] A = getattr(field.type ,'__origin__' ,field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) A = ( field.type.__args__[0] if isinstance(A_ ,field.type.__args__[1] ) else field.type.__args__[1] ) A = getattr(field.type ,'__origin__' ,field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) A = {} if origin_type is Literal or (isinstance(field.type ,A_ ) and issubclass(field.type ,A_ )): if origin_type is Literal: A = field.type.__args__ else: A = [x.value for x in field.type] A = make_choice_type_function(kwargs['choices'] ) if field.default is not dataclasses.MISSING: A = field.default else: A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument A = copy(A_ ) # Hack because type=bool in argparse does not behave as we want. A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way A = default # This tells argparse we accept 0 or 1 value after --field_name A = '?' # This is the value that will get picked if we do --field_name (without value) A = True elif isclass(A_ ) and issubclass(A_ ,A_ ): A = field.type.__args__[0] A = '+' if field.default_factory is not dataclasses.MISSING: A = field.default_factory() elif field.default is dataclasses.MISSING: A = True else: A = field.type if field.default is not dataclasses.MISSING: A = field.default elif field.default_factory is not dataclasses.MISSING: A = field.default_factory() else: A = True parser.add_argument(A_ ,*A_ ,**A_ ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): A = False parser.add_argument(F'--no_{field.name}' ,action='store_false' ,dest=field.name ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : DataClassType ) -> List[Any]: if hasattr(A_ ,'_argument_group_name' ): A = self.add_argument_group(dtype._argument_group_name ) else: A = self try: A = get_type_hints(A_ ) except NameError: raise RuntimeError( F'Type resolution failed for {dtype}. Try declaring the class in global scope or ' 'removing line of `from __future__ import annotations` which opts in Postponed ' 'Evaluation of Annotations (PEP 563)' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A_ ): A = '.'.join(map(A_ ,sys.version_info[:3] ) ) raise RuntimeError( F'Type resolution failed for {dtype} on Python {python_version}. Try removing ' 'line of `from __future__ import annotations` which opts in union types as ' '`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ' 'support Python versions that lower than 3.10, you need to use ' '`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ' '`X | None`.' ) from ex raise for field in dataclasses.fields(A_ ): if not field.init: continue A = type_hints[field.name] self._parse_dataclass_field(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Any=None ,A_ : int=False ,A_ : Any=True ,A_ : List[str]=None ,A_ : Union[str, Any]=None ,) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): A = [] if args_filename: args_files.append(Path(A_ ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values A = ArgumentParser() args_file_parser.add_argument(A_ ,type=A_ ,action='append' ) # Use only remaining args for further parsing (remove the args_file_flag) A , A = args_file_parser.parse_known_args(args=A_ ) A = vars(A_ ).get(args_file_flag.lstrip('-' ) ,A_ ) if cmd_args_file_paths: args_files.extend([Path(A_ ) for p in cmd_args_file_paths] ) A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last A = file_args + args if args is not None else file_args + sys.argv[1:] A , A = self.parse_known_args(args=A_ ) A = [] for dtype in self.dataclass_types: A = {f.name for f in dataclasses.fields(A_ ) if f.init} A = {k: v for k, v in vars(A_ ).items() if k in keys} for k in keys: delattr(A_ ,A_ ) A = dtype(**A_ ) outputs.append(A_ ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(A_ ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' ) return (*outputs,) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Dict[str, Any] ,A_ : bool = False ) -> Tuple[DataClass, ...]: A = set(args.keys() ) A = [] for dtype in self.dataclass_types: A = {f.name for f in dataclasses.fields(A_ ) if f.init} A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) A = dtype(**A_ ) outputs.append(A_ ) if not allow_extra_keys and unused_keys: raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(A_ )}' ) return tuple(A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]: with open(Path(A_ ) ,encoding='utf-8' ) as open_json_file: A = json.loads(open_json_file.read() ) A = self.parse_dict(A_ ,allow_extra_keys=A_ ) return tuple(A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]: A = self.parse_dict(yaml.safe_load(Path(A_ ).read_text() ) ,allow_extra_keys=A_ ) return tuple(A_ )
74
1
"""simple docstring""" from __future__ import annotations _lowercase = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ): A = [ [0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) ) ] # the reference grid A = 1 A = [ [0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) ) ] # the action grid A = init[0] A = init[1] A = 0 A = g + heuristic[x][y] # cost from starting cell to destination cell A = [[f, g, x, y]] A = False # flag that is set when search is complete A = False # flag set if we can't find expand while not found and not resign: if len(snake_case__ ) == 0: raise ValueError('Algorithm is unable to find solution' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() A = cell.pop() A = next_cell[2] A = next_cell[3] A = next_cell[1] if x == goal[0] and y == goal[1]: A = True else: for i in range(len(snake_case__ ) ): # to try out different valid actions A = x + DIRECTIONS[i][0] A = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: A = g + cost A = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) A = 1 A = i A = [] A = goal[0] A = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: A = x - DIRECTIONS[action[x][y]][0] A = y - DIRECTIONS[action[x][y]][1] A = xa A = ya invpath.append([x, y] ) A = [] for i in range(len(snake_case__ ) ): path.append(invpath[len(snake_case__ ) - 1 - i] ) return path, action if __name__ == "__main__": _lowercase = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] _lowercase = [0, 0] # all coordinates are given in format [y,x] _lowercase = [len(grid) - 1, len(grid[0]) - 1] _lowercase = 1 # the cost map which pushes the path closer to the goal _lowercase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): _lowercase = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map _lowercase = 99 _lowercase , _lowercase = search(grid, init, goal, cost, heuristic) print('''ACTION MAP''') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
74
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _lowercase = 16 _lowercase = 32 def _snake_case ( snake_case__ : Accelerator , snake_case__ : int = 16 , snake_case__ : str = "bert-base-cased" ): A = AutoTokenizer.from_pretrained(snake_case__ ) A = load_dataset('glue' , 'mrpc' ) def tokenize_function(snake_case__ : Dict ): # max_length=None => use the model max length (it's actually the default) A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset A = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=snake_case__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(snake_case__ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. A = DataLoader( tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) A = DataLoader( tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Optional[int] ): # Initialize accelerator A = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A = config['lr'] A = int(config['num_epochs'] ) A = int(config['seed'] ) A = int(config['batch_size'] ) A = args.model_name_or_path set_seed(snake_case__ ) A , A = get_dataloaders(snake_case__ , snake_case__ , snake_case__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ ) # Instantiate optimizer A = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) A = optimizer_cls(params=model.parameters() , lr=snake_case__ ) if accelerator.state.deepspeed_plugin is not None: A = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: A = 1 A = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): A = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , ) else: A = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A , A , A , A , A = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # We need to keep track of how many total steps we have iterated over A = 0 # We also need to keep track of the stating epoch so files are named properly A = 0 # Now we train the model A = evaluate.load('glue' , 'mrpc' ) A = 0 A = {} for epoch in range(snake_case__ , snake_case__ ): model.train() for step, batch in enumerate(snake_case__ ): A = model(**snake_case__ ) A = outputs.loss A = loss / gradient_accumulation_steps accelerator.backward(snake_case__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() A = 0 for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A = model(**snake_case__ ) A = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times A , A = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(snake_case__ ) - 1: A = predictions[: len(eval_dataloader.dataset ) - samples_seen] A = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) A = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , snake_case__ ) A = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: A = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(snake_case__ , snake_case__ ) def _snake_case ( ): A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=snake_case__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , ) parser.add_argument( '--output_dir' , type=snake_case__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=snake_case__ , default=snake_case__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=snake_case__ , default=3 , help='Number of train epochs.' , ) A = parser.parse_args() A = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
74
1
"""simple docstring""" def _snake_case ( snake_case__ : int = 10 , snake_case__ : int = 22 ): A = range(1 , snake_case__ ) A = range(1 , snake_case__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F"""{solution(10, 22) = }""")
74
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ,A_ : str ,A_ : Dict=13 ,A_ : str=7 ,A_ : str=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=False ,A_ : str=False ,A_ : Tuple=False ,A_ : str=2 ,A_ : Optional[int]=99 ,A_ : Union[str, Any]=0 ,A_ : Optional[Any]=32 ,A_ : Optional[int]=5 ,A_ : Optional[int]=4 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=0.1 ,A_ : Union[str, Any]=512 ,A_ : Union[str, Any]=2 ,A_ : Any=0.02 ,A_ : List[str]=2 ,A_ : int=4 ,A_ : int="last" ,A_ : Dict=True ,A_ : Union[str, Any]=None ,A_ : Any=0 ,) -> List[Any]: A = parent A = batch_size A = seq_length A = is_training A = use_input_lengths A = use_token_type_ids A = use_labels A = gelu_activation A = sinusoidal_embeddings A = causal A = asm A = n_langs A = vocab_size A = n_special A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = summary_type A = use_proj A = scope A = bos_token_id def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_input_lengths: A = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,2 ).float() A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: return XLMConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ,A_ : int ,A_ : Dict ,A_ : str ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : int ,A_ : str ,) -> Any: A = XLMModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,lengths=A_ ,langs=A_ ) A = model(A_ ,langs=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Any ,A_ : str ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : str ,A_ : Any ,A_ : str ,A_ : Dict ,) -> Dict: A = XLMWithLMHeadModel(A_ ) model.to(A_ ) model.eval() A = model(A_ ,token_type_ids=A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Any ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[Any] ,) -> int: A = XLMForQuestionAnsweringSimple(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,start_positions=A_ ,end_positions=A_ ) A = outputs self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Optional[int] ,A_ : Any ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Tuple ,A_ : List[str] ,A_ : Optional[int] ,) -> List[Any]: A = XLMForQuestionAnswering(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,) ((A) , ) = result_with_labels.to_tuple() A = model(A_ ,start_positions=A_ ,end_positions=A_ ) ((A) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape ,() ) self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : int ,A_ : Optional[int] ,A_ : List[str] ,A_ : str ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ,) -> Optional[int]: A = XLMForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ,A_ : str ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : Optional[int] ,) -> List[str]: A = self.num_labels A = XLMForTokenClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : Union[str, Any] ,A_ : Dict ,A_ : List[Any] ,) -> List[str]: A = self.num_choices A = XLMForMultipleChoice(config=A_ ) model.to(A_ ) model.eval() A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = model( A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _lowerCamelCase: str = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _lowerCamelCase: Optional[int] = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ) -> Any: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Optional[int] ,A_ : List[Any]=False ) -> int: A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = XLMModelTester(self ) A = ConfigTester(self ,config_class=A_ ,emb_dim=37 ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : Any ,A_ : str ,A_ : Tuple ,A_ : Any ,A_ : Any=False ,A_ : Any=1 ) -> List[Any]: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) ) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = min_length + idx + 1 A = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : str ,A_ : Optional[int] ,A_ : int ,A_ : Any ,A_ : str=False ,A_ : Any=1 ) -> Tuple: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,) pass @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = XLMModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(A_ ) A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president A = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A = model.generate(A_ ,do_sample=A_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ )
74
1
"""simple docstring""" _lowercase = { '''meter''': '''m''', '''kilometer''': '''km''', '''megametre''': '''Mm''', '''gigametre''': '''Gm''', '''terametre''': '''Tm''', '''petametre''': '''Pm''', '''exametre''': '''Em''', '''zettametre''': '''Zm''', '''yottametre''': '''Ym''', } # Exponent of the factor(meter) _lowercase = { '''m''': 0, '''km''': 3, '''Mm''': 6, '''Gm''': 9, '''Tm''': 12, '''Pm''': 15, '''Em''': 18, '''Zm''': 21, '''Ym''': 24, } def _snake_case ( snake_case__ : float , snake_case__ : str , snake_case__ : str ): A = from_type.lower().strip('s' ) A = to_type.lower().strip('s' ) A = UNIT_SYMBOL.get(snake_case__ , snake_case__ ) A = UNIT_SYMBOL.get(snake_case__ , snake_case__ ) if from_sanitized not in METRIC_CONVERSION: A = ( F'Invalid \'from_type\' value: {from_type!r}.\n' F'Conversion abbreviations are: {", ".join(snake_case__ )}' ) raise ValueError(snake_case__ ) if to_sanitized not in METRIC_CONVERSION: A = ( F'Invalid \'to_type\' value: {to_type!r}.\n' F'Conversion abbreviations are: {", ".join(snake_case__ )}' ) raise ValueError(snake_case__ ) A = METRIC_CONVERSION[from_sanitized] A = METRIC_CONVERSION[to_sanitized] A = 1 if from_exponent > to_exponent: A = from_exponent - to_exponent else: A = -(to_exponent - from_exponent) return value * pow(10 , snake_case__ ) if __name__ == "__main__": from doctest import testmod testmod()
74
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf _lowercase = logging.get_logger(__name__) @dataclass class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : int ,**A_ : Any ) -> Any: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: A = deprecated_arg[3:] A = not kwargs.pop(A_ ) logger.warning( F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or' F' {positive_arg}={kwargs[positive_arg]}' ) A = kwargs.pop('tpu_name' ,self.tpu_name ) A = kwargs.pop('device_idx' ,self.device_idx ) A = kwargs.pop('eager_mode' ,self.eager_mode ) A = kwargs.pop('use_xla' ,self.use_xla ) super().__init__(**A_ ) _lowerCamelCase: str = field( default=_lowercase , metadata={'''help''': '''Name of TPU'''} , ) _lowerCamelCase: int = field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) _lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Benchmark models in eager model.'''} ) _lowerCamelCase: bool = field( default=_lowercase , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self ,['tf'] ) A = None if self.tpu: try: if self.tpu_name: A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: A = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: A = None return tpu @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self ,['tf'] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) A = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' ) A = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' ) else: tf.config.set_visible_devices([] ,'GPU' ) # disable GPU A = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' ) return strategy @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool: requires_backends(self ,['tf'] ) return self._setup_tpu is not None @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> "tf.distribute.Strategy": requires_backends(self ,['tf'] ) return self._setup_strategy @property def _SCREAMING_SNAKE_CASE ( self : int ) -> str: requires_backends(self ,['tf'] ) return tf.config.list_physical_devices('GPU' ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: requires_backends(self ,['tf'] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _SCREAMING_SNAKE_CASE ( self : str ) -> bool: return self.n_gpu > 0
74
1
"""simple docstring""" import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy _lowercase = logging.getLogger(__name__) _lowercase = '''pytorch_model.bin''' @dataclasses.dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = dataclasses.field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} ) _lowerCamelCase: Optional[str] = dataclasses.field( default=_lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , ) @dataclasses.dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} ) _lowerCamelCase: str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} ) _lowerCamelCase: Optional[str] = dataclasses.field( default=_lowercase , metadata={'''help''': '''A csv or a json file containing the validation data.'''} ) _lowerCamelCase: Optional[str] = dataclasses.field( default=_lowercase , metadata={'''help''': '''The name of the task to train on.'''} , ) _lowerCamelCase: Optional[List[str]] = dataclasses.field( default=_lowercase , metadata={'''help''': '''The list of labels for the task.'''} ) @dataclasses.dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = dataclasses.field( metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} ) _lowerCamelCase: Optional[str] = dataclasses.field( default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} ) _lowerCamelCase: Optional[str] = dataclasses.field( default='''no''' , metadata={ '''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]''' } , ) _lowerCamelCase: Optional[int] = dataclasses.field( default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , ) _lowerCamelCase: Optional[float] = dataclasses.field( default=0.0 , metadata={ '''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.''' } , ) _lowerCamelCase: Optional[bool] = dataclasses.field( default=_lowercase , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , ) _lowerCamelCase: Optional[bool] = dataclasses.field( default=_lowercase , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , ) _lowerCamelCase: Optional[bool] = dataclasses.field( default=_lowercase , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , ) _lowerCamelCase: Optional[float] = dataclasses.field( default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , ) _lowerCamelCase: Optional[int] = dataclasses.field( default=100 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , ) _lowerCamelCase: Optional[int] = dataclasses.field( default=_lowercase , metadata={'''help''': '''Random seed for initialization.'''} , ) def _snake_case ( snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Dict ): A = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: A = dataset.filter(lambda snake_case__ : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 A = int(eval_result * len(snake_case__ ) ) print(snake_case__ ) A = dataset.sort('probability' , reverse=snake_case__ ) A = dataset.select(range(snake_case__ ) ) A = dataset.remove_columns(['label', 'probability'] ) A = dataset.rename_column('prediction' , 'label' ) A = dataset.map(lambda snake_case__ : {"label": idalabel[example["label"]]} ) A = dataset.shuffle(seed=args.seed ) A = os.path.join(snake_case__ , F'train_pseudo.{args.data_file_extension}' ) if args.data_file_extension == "csv": dataset.to_csv(snake_case__ , index=snake_case__ ) else: dataset.to_json(snake_case__ ) def _snake_case ( snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : str , **snake_case__ : Optional[int] ): A = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() A = STModelArguments(model_name_or_path=snake_case__ ) A = STDataArguments(train_file=snake_case__ , infer_file=snake_case__ ) A = STTrainingArguments(output_dir=snake_case__ ) A = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(snake_case__ ).items(): setattr(snake_case__ , snake_case__ , snake_case__ ) for key, value in kwargs.items(): if hasattr(snake_case__ , snake_case__ ): setattr(snake_case__ , snake_case__ , snake_case__ ) # Sanity checks A = {} A = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None A = args.train_file A = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None A = args.eval_file for key in data_files: A = data_files[key].split('.' )[-1] assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.' if args.data_file_extension is None: A = extension else: assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.' assert ( args.eval_metric in datasets.list_metrics() ), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.' # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info('Creating the initial data directory for self-training...' ) A = F'{args.output_dir}/self-train_iter-{{}}'.format A = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=snake_case__ ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) accelerator.wait_for_everyone() A = None A = None A = 0 A = False # Show the progress bar A = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): A = data_dir_format(snake_case__ ) assert os.path.exists(snake_case__ ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 A = os.path.join(snake_case__ , 'stage-1' ) A = { 'accelerator': accelerator, 'model_name_or_path': args.model_name_or_path, 'cache_dir': args.cache_dir, 'do_train': True, 'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'], 'do_eval': True if args.eval_file is not None else False, 'eval_file': data_files['eval'], 'do_predict': True, 'infer_file': data_files['infer'], 'task_name': args.task_name, 'label_list': args.label_list, 'output_dir': current_output_dir, 'eval_metric': args.eval_metric, 'evaluation_strategy': args.evaluation_strategy, 'early_stopping_patience': args.early_stopping_patience, 'early_stopping_threshold': args.early_stopping_threshold, 'seed': args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(snake_case__ , snake_case__ ): arguments_dict.update({key: value} ) A = os.path.join(snake_case__ , 'best-checkpoint' , snake_case__ ) if os.path.exists(snake_case__ ): logger.info( 'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , snake_case__ , snake_case__ , ) else: logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , snake_case__ ) finetune(**snake_case__ ) accelerator.wait_for_everyone() assert os.path.exists(snake_case__ ) logger.info('Self-training job completed: iteration: %d, stage: 1.' , snake_case__ ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data A = os.path.join(snake_case__ , 'best-checkpoint' ) A = os.path.join(snake_case__ , 'stage-2' ) # Update arguments_dict A = model_path A = data_files['train'] A = current_output_dir A = os.path.join(snake_case__ , 'best-checkpoint' , snake_case__ ) if os.path.exists(snake_case__ ): logger.info( 'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , snake_case__ , snake_case__ , ) else: logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , snake_case__ ) finetune(**snake_case__ ) accelerator.wait_for_everyone() assert os.path.exists(snake_case__ ) logger.info('Self-training job completed: iteration: %d, stage: 2.' , snake_case__ ) A = iteration A = data_dir_format(iteration + 1 ) A = AutoConfig.from_pretrained(os.path.join(snake_case__ , 'best-checkpoint' ) ) A = config.idalabel A = os.path.join(snake_case__ , 'eval_results_best-checkpoint.json' ) A = os.path.join(snake_case__ , 'test_results_best-checkpoint.json' ) assert os.path.exists(snake_case__ ) with open(snake_case__ , 'r' ) as f: A = float(json.load(snake_case__ )[args.eval_metric] ) A = os.path.join(snake_case__ , 'infer_output_best-checkpoint.csv' ) assert os.path.exists(snake_case__ ) # Loading the dataset from local csv or json files. A = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data'] A = load_dataset('csv' , data_files={'data': infer_output_file} )['data'] if accelerator.is_main_process: os.makedirs(snake_case__ , exist_ok=snake_case__ ) shutil.copy(snake_case__ , os.path.join(snake_case__ , F'eval_results_iter-{iteration}.json' ) ) if os.path.exists(snake_case__ ): shutil.copy(snake_case__ , os.path.join(snake_case__ , F'test_results_iter-{iteration}.json' ) ) create_pseudo_labeled_data(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) accelerator.wait_for_everyone() A = os.path.join(snake_case__ , F'train_pseudo.{args.data_file_extension}' ) if args.evaluation_strategy != IntervalStrategy.NO.value: A = eval_result if best_iteration is None: A = new_iteration A = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: A = new_iteration A = new_eval_result A = 0 else: if new_eval_result == best_eval_result: A = new_iteration A = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: A = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info('Best iteration: %d' , snake_case__ ) logger.info('Best evaluation result: %s = %f' , args.eval_metric , snake_case__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(snake_case__ , F'eval_results_iter-{iteration}.json' ) , os.path.join(snake_case__ , 'eval_results_best-iteration.json' ) , ) else: # Assume that the last iteration is the best logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 ) logger.info('Best evaluation result: %s = %f' , args.eval_metric , snake_case__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(snake_case__ , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(snake_case__ , 'eval_results_best-iteration.json' ) , )
74
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig _lowercase = logging.get_logger(__name__) _lowercase = { '''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''', # See all DPT models at https://huggingface.co/models?filter=dpt } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Tuple = '''dpt''' def __init__( self : str ,A_ : Tuple=768 ,A_ : int=12 ,A_ : Optional[int]=12 ,A_ : Optional[int]=3072 ,A_ : List[str]="gelu" ,A_ : str=0.0 ,A_ : int=0.0 ,A_ : str=0.02 ,A_ : str=1e-12 ,A_ : str=384 ,A_ : Dict=16 ,A_ : Union[str, Any]=3 ,A_ : Dict=False ,A_ : Any=True ,A_ : Optional[int]=[2, 5, 8, 11] ,A_ : Optional[Any]="project" ,A_ : Tuple=[4, 2, 1, 0.5] ,A_ : int=[96, 192, 384, 768] ,A_ : int=256 ,A_ : str=-1 ,A_ : Optional[int]=False ,A_ : Optional[int]=True ,A_ : Union[str, Any]=0.4 ,A_ : Union[str, Any]=255 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=[1, 1024, 24, 24] ,A_ : List[str]=[0, 1] ,A_ : List[Any]=None ,**A_ : Tuple ,) -> Union[str, Any]: super().__init__(**A_ ) A = hidden_size A = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('Initializing the config with a `BiT` backbone.' ) A = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, } A = BitConfig(**A_ ) elif isinstance(A_ ,A_ ): logger.info('Initializing the config with a `BiT` backbone.' ) A = BitConfig(**A_ ) elif isinstance(A_ ,A_ ): A = backbone_config else: raise ValueError( F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' ) A = backbone_featmap_shape A = neck_ignore_stages if readout_type != "project": raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' ) else: A = None A = None A = [] A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' ) A = readout_type A = reassemble_factors A = neck_hidden_sizes A = fusion_hidden_size A = head_in_index A = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) A = use_auxiliary_head A = auxiliary_loss_weight A = semantic_loss_ignore_index A = semantic_classifier_dropout def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: A = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: A = self.backbone_config.to_dict() A = self.__class__.model_type return output
74
1
"""simple docstring""" import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[Any] = RoFormerTokenizer _lowerCamelCase: Any = RoFormerTokenizerFast _lowerCamelCase: Optional[int] = True _lowerCamelCase: Any = True def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: super().setUp() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,**A_ : Tuple ) -> Any: return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,**A_ : Optional[int] ) -> Dict: return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: A = '永和服装饰品有限公司,今天天气非常好' A = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: A = self.get_tokenizer() A , A = self.get_chinese_input_output_texts() A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,output_text.split() ) A = tokens + [tokenizer.unk_token] A = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: A = self.get_rust_tokenizer() A , A = self.get_chinese_input_output_texts() A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,output_text.split() ) A = tokens + [tokenizer.unk_token] A = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: pass def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: pass def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: pass
74
"""simple docstring""" from __future__ import annotations import math _lowercase = '''2020.9.26''' _lowercase = '''xcodz-dot, cclaus, dhruvmanila''' def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float ): if not all(isinstance(snake_case__ , (float, int) ) for val in locals().values() ): A = F'Input values must either be float or int: {list(locals().values() )}' raise TypeError(snake_case__ ) A = ((x * distance) / (z + distance)) * scale A = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : str , snake_case__ : float ): if not isinstance(snake_case__ , snake_case__ ): raise TypeError('Axis must be a str' ) A = locals() del input_variables["axis"] if not all(isinstance(snake_case__ , (float, int) ) for val in input_variables.values() ): A = ( 'Input values except axis must either be float or int: ' F'{list(input_variables.values() )}' ) raise TypeError(snake_case__ ) A = (angle % 360) / 450 * 180 / math.pi if axis == "z": A = x * math.cos(snake_case__ ) - y * math.sin(snake_case__ ) A = y * math.cos(snake_case__ ) + x * math.sin(snake_case__ ) A = z elif axis == "x": A = y * math.cos(snake_case__ ) - z * math.sin(snake_case__ ) A = z * math.cos(snake_case__ ) + y * math.sin(snake_case__ ) A = x elif axis == "y": A = x * math.cos(snake_case__ ) - z * math.sin(snake_case__ ) A = z * math.cos(snake_case__ ) + x * math.sin(snake_case__ ) A = y else: raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""") print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
74
1
"""simple docstring""" import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def _snake_case ( snake_case__ : List[str] ): A = [] for line in lines: A = re.sub(r'#.*' , '' , snake_case__ ) # remove comments if line: filtered_lines.append(snake_case__ ) A = '\n'.join(snake_case__ ) # Make a hash from all this code A = full_str.encode('utf-8' ) return shaaaa(snake_case__ ).hexdigest() # get importable module names and hash for caching _lowercase = { '''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), '''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), '''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), '''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), '''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), '''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), '''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), '''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions _lowercase = { '''.csv''': ('''csv''', {}), '''.tsv''': ('''csv''', {'''sep''': '''\t'''}), '''.json''': ('''json''', {}), '''.jsonl''': ('''json''', {}), '''.parquet''': ('''parquet''', {}), '''.arrow''': ('''arrow''', {}), '''.txt''': ('''text''', {}), } _EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _lowercase = {'''imagefolder''', '''audiofolder'''} # Used to filter data files based on extensions given a module name _lowercase = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''') _MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
74
"""simple docstring""" class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int ,A_ : int ) -> Union[str, Any]: A = n A = [None] * self.n A = 0 # index of the first element A = 0 A = 0 def __len__( self : int ) -> int: return self.size def _SCREAMING_SNAKE_CASE ( self : Any ) -> bool: return self.size == 0 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: return False if self.is_empty() else self.array[self.front] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> int: if self.size >= self.n: raise Exception('QUEUE IS FULL' ) A = data A = (self.rear + 1) % self.n self.size += 1 return self def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: if self.size == 0: raise Exception('UNDERFLOW' ) A = self.array[self.front] A = None A = (self.front + 1) % self.n self.size -= 1 return temp
74
1
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _lowercase = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _snake_case ( snake_case__ : Union[str, Any] ): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case__ ) def _snake_case ( snake_case__ : int ): from diffusers.utils.testing_utils import pytest_terminal_summary_main A = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
74
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _lowercase = logging.get_logger(__name__) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Union[str, Any] ,*A_ : List[str] ,**A_ : int ) -> None: warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' ,A_ ,) super().__init__(*A_ ,**A_ )
74
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Tuple = StableDiffusionPanoramaPipeline _lowerCamelCase: int = TEXT_TO_IMAGE_PARAMS _lowerCamelCase: List[str] = TEXT_TO_IMAGE_BATCH_PARAMS _lowerCamelCase: Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS _lowerCamelCase: List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: torch.manual_seed(0 ) A = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=1 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,) A = DDIMScheduler() torch.manual_seed(0 ) A = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,) torch.manual_seed(0 ) A = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) A = CLIPTextModel(A_ ) A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) A = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : List[Any]=0 ) -> List[str]: A = torch.manual_seed(A_ ) A = { 'prompt': 'a photo of the dolomites', 'generator': generator, # Setting height and width to None to prevent OOMs on CPU. 'height': None, 'width': None, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = StableDiffusionPanoramaPipeline(**A_ ) A = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = sd_pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: super().test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=3.25e-3 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = StableDiffusionPanoramaPipeline(**A_ ) A = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 'french fries' A = sd_pipe(**A_ ,negative_prompt=A_ ) A = output.images A = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = StableDiffusionPanoramaPipeline(**A_ ) A = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = sd_pipe(**A_ ,view_batch_size=2 ) A = output.images A = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = EulerAncestralDiscreteScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ) A = StableDiffusionPanoramaPipeline(**A_ ) A = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = sd_pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,skip_prk_steps=A_ ) A = StableDiffusionPanoramaPipeline(**A_ ) A = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = sd_pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any]=0 ) -> int: A = torch.manual_seed(A_ ) A = { 'prompt': 'a photo of the dolomites', 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: A = 'stabilityai/stable-diffusion-2-base' A = DDIMScheduler.from_pretrained(A_ ,subfolder='scheduler' ) A = StableDiffusionPanoramaPipeline.from_pretrained(A_ ,scheduler=A_ ,safety_checker=A_ ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing() A = self.get_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) A = np.array( [ 0.36_96_83_92, 0.27_02_53_72, 0.32_44_67_66, 0.28_37_93_87, 0.36_36_32_74, 0.30_73_33_47, 0.27_10_00_27, 0.27_05_41_25, 0.25_53_60_96, ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: A = StableDiffusionPanoramaPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-base' ,safety_checker=A_ ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing() A = self.get_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) A = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: A = 0 def callback_fn(A_ : int ,A_ : int ,A_ : torch.FloatTensor ) -> None: A = True nonlocal number_of_steps number_of_steps += 1 if step == 1: A = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) A = latents[0, -3:, -3:, -1] A = np.array( [ 0.18_68_18_69, 0.33_90_78_16, 0.5_36_12_76, 0.14_43_28_65, -0.02_85_66_11, -0.73_94_11_23, 0.23_39_79_87, 0.47_32_26_82, -0.37_82_31_64, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: A = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) A = latents[0, -3:, -3:, -1] A = np.array( [ 0.18_53_96_45, 0.33_98_72_48, 0.5_37_85_59, 0.14_43_71_42, -0.02_45_52_61, -0.7_33_83_17, 0.23_99_07_55, 0.47_35_62_72, -0.3_78_65_05, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 A = False A = 'stabilityai/stable-diffusion-2-base' A = DDIMScheduler.from_pretrained(A_ ,subfolder='scheduler' ) A = StableDiffusionPanoramaPipeline.from_pretrained(A_ ,scheduler=A_ ,safety_checker=A_ ) A = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing() A = self.get_inputs() pipe(**A_ ,callback=A_ ,callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() A = 'stabilityai/stable-diffusion-2-base' A = DDIMScheduler.from_pretrained(A_ ,subfolder='scheduler' ) A = StableDiffusionPanoramaPipeline.from_pretrained(A_ ,scheduler=A_ ,safety_checker=A_ ) A = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() A = self.get_inputs() A = pipe(**A_ ) A = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
74
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = '''gpt_bigcode''' _lowerCamelCase: List[Any] = ['''past_key_values'''] _lowerCamelCase: int = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Optional[int] ,A_ : Dict=5_0257 ,A_ : Union[str, Any]=1024 ,A_ : str=768 ,A_ : Any=12 ,A_ : Any=12 ,A_ : Optional[int]=None ,A_ : Any="gelu_pytorch_tanh" ,A_ : List[str]=0.1 ,A_ : Optional[int]=0.1 ,A_ : List[str]=0.1 ,A_ : Tuple=1e-5 ,A_ : Optional[int]=0.02 ,A_ : List[str]=True ,A_ : Optional[Any]=True ,A_ : List[Any]=5_0256 ,A_ : Union[str, Any]=5_0256 ,A_ : int=True ,A_ : Optional[Any]=True ,A_ : Dict=True ,**A_ : Union[str, Any] ,) -> Union[str, Any]: A = vocab_size A = n_positions A = n_embd A = n_layer A = n_head A = n_inner A = activation_function A = resid_pdrop A = embd_pdrop A = attn_pdrop A = layer_norm_epsilon A = initializer_range A = scale_attn_weights A = use_cache A = attention_softmax_in_fpaa A = scale_attention_softmax_in_fpaa A = multi_query A = bos_token_id A = eos_token_id super().__init__(bos_token_id=A_ ,eos_token_id=A_ ,**A_ )
74
1
"""simple docstring""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Any = ['''image_processor''', '''tokenizer'''] _lowerCamelCase: Optional[int] = '''BlipImageProcessor''' _lowerCamelCase: Optional[Any] = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self : Any ,A_ : Optional[int] ,A_ : Dict ) -> Tuple: A = False super().__init__(A_ ,A_ ) A = self.image_processor def __call__( self : List[str] ,A_ : ImageInput = None ,A_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,A_ : bool = True ,A_ : Union[bool, str, PaddingStrategy] = False ,A_ : Union[bool, str, TruncationStrategy] = None ,A_ : Optional[int] = None ,A_ : int = 0 ,A_ : Optional[int] = None ,A_ : Optional[bool] = None ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = True ,A_ : Optional[Union[str, TensorType]] = None ,**A_ : List[str] ,) -> BatchEncoding: if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: A = self.tokenizer A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) return text_encoding # add pixel_values A = self.image_processor(A_ ,return_tensors=A_ ) if text is not None: A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) else: A = None if text_encoding is not None: encoding_image_processor.update(A_ ) return encoding_image_processor def _SCREAMING_SNAKE_CASE ( self : List[Any] ,*A_ : Tuple ,**A_ : Tuple ) -> List[str]: return self.tokenizer.batch_decode(*A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : str ,*A_ : List[Any] ,**A_ : Optional[int] ) -> Union[str, Any]: return self.tokenizer.decode(*A_ ,**A_ ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = self.tokenizer.model_input_names A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
74
"""simple docstring""" import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed _lowercase = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) _lowercase = '''sshleifer/student_marian_en_ro_6_1''' _lowercase = '''sshleifer/tiny-mbart''' @require_torch class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any]=False ,A_ : Optional[int]=None ,A_ : List[str]=True ,A_ : Tuple=True ,A_ : Union[str, Any]=True ,A_ : List[str]=True ,) -> Tuple: A = self.run_trainer( eval_steps=1 ,max_len=12 ,model_name=A_ ,num_train_epochs=1 ,distributed=A_ ,extra_args_str=A_ ,predict_with_generate=A_ ,do_train=A_ ,do_eval=A_ ,do_predict=A_ ,) A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history if not do_eval: return A = [log for log in logs if 'eval_loss' in log.keys()] A = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats A = eval_metrics[-1] assert isinstance(last_step_stats['eval_bleu'] ,A_ ) assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: self.run_seqaseq_quick() @require_torch_multi_gpu def _SCREAMING_SNAKE_CASE ( self : int ) -> int: self.run_seqaseq_quick(distributed=A_ ) @require_torch_multi_gpu def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: self.run_seqaseq_quick(distributed=A_ ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple' ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple --fp16' ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2' ,predict_with_generate=A_ ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: self.run_seqaseq_quick( distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2 --fp16' ,predict_with_generate=A_ ) @require_apex @require_torch_gpu def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' ) @parameterized.expand(['base', 'low', 'high', 'mixed'] ) @require_torch_multi_gpu def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Dict ) -> List[str]: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout A = { # test with the default log_level - should be info and thus log info once 'base': {'extra_args_str': '', 'n_matches': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes 'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica 'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1}, # test with high log_level and log_level_replica - should be quiet on all processes 'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0}, } A = experiments[experiment_id] A = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False} A = 'Running training' with CaptureStderr() as cl: self.run_seqaseq_quick(**A_ ,extra_args_str=data['extra_args_str'] ) A = len(re.findall(A_ ,cl.err ) ) self.assertEqual(A_ ,data['n_matches'] ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: A = self.run_trainer( eval_steps=2 ,max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=10 ,distributed=A_ ,) # Check metrics A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history A = [log for log in logs if 'eval_loss' in log.keys()] A = eval_metrics[0] A = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['eval_bleu'] ,A_ ) # test if do_predict saves generations and metrics A = os.listdir(A_ ) A = {os.path.basename(A_ ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: from transformers.training_args import OptimizerNames def train_and_return_metrics(A_ : str ) -> Tuple[int, float]: A = '--skip_memory_metrics 0' A = self.run_trainer( max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=1 ,optim=A_ ,distributed=A_ ,extra_args_str=A_ ,do_eval=A_ ,do_predict=A_ ,n_gpus_to_use=1 ,) # Check metrics A = TrainerState.load_from_json(Path(A_ ,'trainer_state.json' ) ).log_history A = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 ) A = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 ) A = logs[0]['train_loss'] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) A = gpu_alloc_mem_orig - gpu_alloc_mem_bnb A = gpu_peak_mem_orig + gpu_alloc_mem_orig A = gpu_peak_mem_bnb + gpu_alloc_mem_bnb A = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings A = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( A_ ,A_ ,'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got' F' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and' F' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' ,) self.assertGreater( A_ ,A_ ,'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got' F' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and' F' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' ,) self.assertEqual( A_ ,A_ ,F'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ,A_ : str ,A_ : int ,A_ : float = 3e-3 ,A_ : str = "adafactor" ,A_ : bool = False ,A_ : str = None ,A_ : int = 0 ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : int = None ,) -> Dict: A = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro' A = self.get_auto_remove_tmp_dir() A = F'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split() A = F'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A_ )}\n '.split() A = '\n --do_predict\n '.split() A = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F'--optim {optim}'.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: A = get_gpu_count() A = get_torch_dist_unique_port() A = F'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split() A = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(A_ ,env=self.get_env() ) else: A = ['run_translation.py'] + args with patch.object(A_ ,'argv' ,A_ ): main() return output_dir
74
1
"""simple docstring""" import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 _lowercase = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''') _lowercase = get_tests_dir('''fixtures/vocab.json''') _lowercase = get_tests_dir('''fixtures''') class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' _lowerCamelCase: str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: A = 0 def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: A = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' ) self.assertIsInstance(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: with tempfile.TemporaryDirectory() as tmpdirname: A = WavaVecaConfig() A = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' ) # save in new folder model_config.save_pretrained(A_ ) processor.save_pretrained(A_ ) A = AutoProcessor.from_pretrained(A_ ) self.assertIsInstance(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(A_ ,os.path.join(A_ ,A_ ) ) copyfile(A_ ,os.path.join(A_ ,'vocab.json' ) ) A = AutoProcessor.from_pretrained(A_ ) self.assertIsInstance(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: with tempfile.TemporaryDirectory() as tmpdirname: A = WavaVecaFeatureExtractor() A = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' ) A = WavaVecaProcessor(A_ ,A_ ) # save in new folder processor.save_pretrained(A_ ) # drop `processor_class` in tokenizer with open(os.path.join(A_ ,A_ ) ,'r' ) as f: A = json.load(A_ ) config_dict.pop('processor_class' ) with open(os.path.join(A_ ,A_ ) ,'w' ) as f: f.write(json.dumps(A_ ) ) A = AutoProcessor.from_pretrained(A_ ) self.assertIsInstance(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmpdirname: A = WavaVecaFeatureExtractor() A = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' ) A = WavaVecaProcessor(A_ ,A_ ) # save in new folder processor.save_pretrained(A_ ) # drop `processor_class` in feature extractor with open(os.path.join(A_ ,A_ ) ,'r' ) as f: A = json.load(A_ ) config_dict.pop('processor_class' ) with open(os.path.join(A_ ,A_ ) ,'w' ) as f: f.write(json.dumps(A_ ) ) A = AutoProcessor.from_pretrained(A_ ) self.assertIsInstance(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: with tempfile.TemporaryDirectory() as tmpdirname: A = WavaVecaConfig(processor_class='Wav2Vec2Processor' ) model_config.save_pretrained(A_ ) # copy relevant files copyfile(A_ ,os.path.join(A_ ,'vocab.json' ) ) # create emtpy sample processor with open(os.path.join(A_ ,A_ ) ,'w' ) as f: f.write('{}' ) A = AutoProcessor.from_pretrained(A_ ) self.assertIsInstance(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(A_ ): A = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' ) # If remote code is disabled, we can't load this config. with self.assertRaises(A_ ): A = AutoProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=A_ ) A = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' ,trust_remote_code=A_ ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ ,'NewProcessor' ) A = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' ) A = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizerFast' ) # Test we can also load the slow version A = AutoProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=A_ ,use_fast=A_ ) A = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ ,'NewTokenizer' ) else: self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: try: AutoConfig.register('custom' ,A_ ) AutoFeatureExtractor.register(A_ ,A_ ) AutoTokenizer.register(A_ ,slow_tokenizer_class=A_ ) AutoProcessor.register(A_ ,A_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(A_ ): AutoProcessor.register(A_ ,A_ ) # Now that the config is registered, it can be used as any other config with the auto-API A = CustomFeatureExtractor.from_pretrained(A_ ) with tempfile.TemporaryDirectory() as tmp_dir: A = os.path.join(A_ ,'vocab.txt' ) with open(A_ ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) A = CustomTokenizer(A_ ) A = CustomProcessor(A_ ,A_ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(A_ ) A = AutoProcessor.from_pretrained(A_ ) self.assertIsInstance(A_ ,A_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = False class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: str = False class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Tuple = '''AutoFeatureExtractor''' _lowerCamelCase: Tuple = '''AutoTokenizer''' _lowerCamelCase: Optional[Any] = False try: AutoConfig.register('custom' ,A_ ) AutoFeatureExtractor.register(A_ ,A_ ) AutoTokenizer.register(A_ ,slow_tokenizer_class=A_ ) AutoProcessor.register(A_ ,A_ ) # If remote code is not set, the default is to use local classes. A = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' ) self.assertEqual(processor.__class__.__name__ ,'NewProcessor' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. A = AutoProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=A_ ) self.assertEqual(processor.__class__.__name__ ,'NewProcessor' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. A = AutoProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=A_ ) self.assertEqual(processor.__class__.__name__ ,'NewProcessor' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: A = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' ) self.assertEqual(processor.__class__.__name__ ,'BertTokenizerFast' ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' ) self.assertEqual(processor.__class__.__name__ ,'ConvNextImageProcessor' ) @is_staging_test class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ) -> Optional[Any]: A = TOKEN HfFolder.save_token(A_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ) -> Union[str, Any]: try: delete_repo(token=cls._token ,repo_id='test-processor' ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id='valid_org/test-processor-org' ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id='test-dynamic-processor' ) except HTTPError: pass def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: A = WavaVecaProcessor.from_pretrained(A_ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(A_ ,'test-processor' ) ,push_to_hub=A_ ,use_auth_token=self._token ) A = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(A_ ,getattr(new_processor.feature_extractor ,A_ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() ,processor.tokenizer.get_vocab() ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: A = WavaVecaProcessor.from_pretrained(A_ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(A_ ,'test-processor-org' ) ,push_to_hub=A_ ,use_auth_token=self._token ,organization='valid_org' ,) A = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(A_ ,getattr(new_processor.feature_extractor ,A_ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() ,processor.tokenizer.get_vocab() ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() A = CustomFeatureExtractor.from_pretrained(A_ ) with tempfile.TemporaryDirectory() as tmp_dir: A = os.path.join(A_ ,'vocab.txt' ) with open(A_ ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) A = CustomTokenizer(A_ ) A = CustomProcessor(A_ ,A_ ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F'{USER}/test-dynamic-processor' ,token=self._token ) A = Repository(A_ ,clone_from=F'{USER}/test-dynamic-processor' ,token=self._token ) processor.save_pretrained(A_ ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map ,{ 'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor', 'AutoProcessor': 'custom_processing.CustomProcessor', } ,) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(A_ ,'tokenizer_config.json' ) ) as f: A = json.load(A_ ) self.assertDictEqual( tokenizer_config['auto_map'] ,{ 'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None], 'AutoProcessor': 'custom_processing.CustomProcessor', } ,) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(A_ ,'custom_feature_extraction.py' ) ) ) self.assertTrue(os.path.isfile(os.path.join(A_ ,'custom_tokenization.py' ) ) ) self.assertTrue(os.path.isfile(os.path.join(A_ ,'custom_processing.py' ) ) ) repo.push_to_hub() A = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' ,trust_remote_code=A_ ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ ,'CustomProcessor' )
74
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''facebook/deit-base-distilled-patch16-224''': ( '''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json''' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''deit''' def __init__( self : int ,A_ : Optional[Any]=768 ,A_ : Union[str, Any]=12 ,A_ : Dict=12 ,A_ : int=3072 ,A_ : Optional[Any]="gelu" ,A_ : Dict=0.0 ,A_ : Any=0.0 ,A_ : str=0.02 ,A_ : Tuple=1e-12 ,A_ : Union[str, Any]=224 ,A_ : Optional[Any]=16 ,A_ : List[Any]=3 ,A_ : Optional[Any]=True ,A_ : Optional[int]=16 ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(**A_ ) A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = encoder_stride class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: int = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> float: return 1e-4
74
1
"""simple docstring""" import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder _lowercase = '''base_with_context''' def _snake_case ( snake_case__ : int , snake_case__ : Tuple ): A = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) ) A = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case__ ) for lyr_num, lyr in enumerate(model.encoders ): A = weights[F'layers_{lyr_num}'] A = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) A = ly_weight['attention'] A = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def _snake_case ( snake_case__ : Dict , snake_case__ : List[Any] ): A = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) ) A = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case__ ) for lyr_num, lyr in enumerate(model.encoders ): A = weights[F'layers_{lyr_num}'] A = ly_weight['attention'] A = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) A = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) A = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[Any] ): A = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) ) A = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case__ ) A = nn.Parameter( torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) ) for lyr_num, lyr in enumerate(model.decoders ): A = weights[F'layers_{lyr_num}'] A = nn.Parameter( torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) ) A = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) ) A = ly_weight['self_attention'] A = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) A = ly_weight['MultiHeadDotProductAttention_0'] A = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) A = nn.Parameter( torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) ) A = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) A = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) A = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) ) A = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) ) return model def _snake_case ( snake_case__ : Dict ): A = checkpoints.load_tax_checkpoint(args.checkpoint_path ) A = jnp.tree_util.tree_map(onp.array , snake_case__ ) A = [ 'from __gin__ import dynamic_registration', 'from music_spectrogram_diffusion.models.diffusion import diffusion_utils', 'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0', 'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()', ] A = os.path.join(args.checkpoint_path , '..' , 'config.gin' ) A = inference.parse_training_gin_file(snake_case__ , snake_case__ ) A = inference.InferenceModel(args.checkpoint_path , snake_case__ ) A = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' ) A = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) A = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) A = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) A = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , snake_case__ ) A = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , snake_case__ ) A = load_decoder(ta_checkpoint['target']['decoder'] , snake_case__ ) A = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' ) A = SpectrogramDiffusionPipeline( notes_encoder=snake_case__ , continuous_encoder=snake_case__ , decoder=snake_case__ , scheduler=snake_case__ , melgan=snake_case__ , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''') parser.add_argument( '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.''' ) parser.add_argument( '''--checkpoint_path''', default=F"""{MODEL}/checkpoint_500000""", type=str, required=False, help='''Path to the original jax model checkpoint.''', ) _lowercase = parser.parse_args() main(args)
74
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int]=0.999 , snake_case__ : Union[str, Any]="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case__ : Union[str, Any] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case__ : Dict ): return math.exp(t * -12.0 ) else: raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' ) A = [] for i in range(snake_case__ ): A = i / num_diffusion_timesteps A = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) ) return torch.tensor(snake_case__ , dtype=torch.floataa ) class lowerCAmelCase_ ( _lowercase , _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = [e.name for e in KarrasDiffusionSchedulers] _lowerCamelCase: Optional[Any] = 2 @register_to_config def __init__( self : str ,A_ : int = 1000 ,A_ : float = 0.0_00_85 ,A_ : float = 0.0_12 ,A_ : str = "linear" ,A_ : Optional[Union[np.ndarray, List[float]]] = None ,A_ : str = "epsilon" ,A_ : Optional[bool] = False ,A_ : Optional[bool] = False ,A_ : float = 1.0 ,A_ : str = "linspace" ,A_ : int = 0 ,) -> List[str]: if trained_betas is not None: A = torch.tensor(A_ ,dtype=torch.floataa ) elif beta_schedule == "linear": A = torch.linspace(A_ ,A_ ,A_ ,dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. A = ( torch.linspace(beta_start**0.5 ,beta_end**0.5 ,A_ ,dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule A = betas_for_alpha_bar(A_ ,alpha_transform_type='cosine' ) elif beta_schedule == "exp": A = betas_for_alpha_bar(A_ ,alpha_transform_type='exp' ) else: raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' ) A = 1.0 - self.betas A = torch.cumprod(self.alphas ,dim=0 ) # set all values self.set_timesteps(A_ ,A_ ,A_ ) A = use_karras_sigmas def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : Tuple=None ) -> Tuple: if schedule_timesteps is None: A = self.timesteps A = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: A = 1 if len(A_ ) > 1 else 0 else: A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep A = self._index_counter[timestep_int] return indices[pos].item() @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : torch.FloatTensor ,A_ : Union[float, torch.FloatTensor] ,) -> torch.FloatTensor: A = self.index_for_timestep(A_ ) A = self.sigmas[step_index] A = sample / ((sigma**2 + 1) ** 0.5) return sample def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : Union[str, torch.device] = None ,A_ : Optional[int] = None ,) -> Optional[Any]: A = num_inference_steps A = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": A = np.linspace(0 ,num_train_timesteps - 1 ,A_ ,dtype=A_ )[::-1].copy() elif self.config.timestep_spacing == "leading": A = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 A = (np.arange(0 ,A_ ) * step_ratio).round()[::-1].copy().astype(A_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": A = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 A = (np.arange(A_ ,0 ,-step_ratio )).round().copy().astype(A_ ) timesteps -= 1 else: raise ValueError( F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' ) A = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) A = np.log(A_ ) A = np.interp(A_ ,np.arange(0 ,len(A_ ) ) ,A_ ) if self.config.use_karras_sigmas: A = self._convert_to_karras(in_sigmas=A_ ,num_inference_steps=self.num_inference_steps ) A = np.array([self._sigma_to_t(A_ ,A_ ) for sigma in sigmas] ) A = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) A = torch.from_numpy(A_ ).to(device=A_ ) A = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) A = torch.from_numpy(A_ ) A = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(A_ ).startswith('mps' ): # mps does not support float64 A = timesteps.to(A_ ,dtype=torch.floataa ) else: A = timesteps.to(device=A_ ) # empty dt and derivative A = None A = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter A = defaultdict(A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : List[str] ) -> Dict: # get log sigma A = np.log(A_ ) # get distribution A = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range A = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) A = low_idx + 1 A = log_sigmas[low_idx] A = log_sigmas[high_idx] # interpolate sigmas A = (low - log_sigma) / (low - high) A = np.clip(A_ ,0 ,1 ) # transform interpolation to time range A = (1 - w) * low_idx + w * high_idx A = t.reshape(sigma.shape ) return t def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : torch.FloatTensor ,A_ : int ) -> torch.FloatTensor: A = in_sigmas[-1].item() A = in_sigmas[0].item() A = 7.0 # 7.0 is the value used in the paper A = np.linspace(0 ,1 ,A_ ) A = sigma_min ** (1 / rho) A = sigma_max ** (1 / rho) A = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: return self.dt is None def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : Union[float, torch.FloatTensor] ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : bool = True ,) -> Union[SchedulerOutput, Tuple]: A = self.index_for_timestep(A_ ) # advance index counter by 1 A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: A = self.sigmas[step_index] A = self.sigmas[step_index + 1] else: # 2nd order / Heun's method A = self.sigmas[step_index - 1] A = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API A = 0 A = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": A = sigma_hat if self.state_in_first_order else sigma_next A = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": A = sigma_hat if self.state_in_first_order else sigma_next A = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": A = model_output else: raise ValueError( F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' ) if self.config.clip_sample: A = pred_original_sample.clamp( -self.config.clip_sample_range ,self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order A = (sample - pred_original_sample) / sigma_hat # 3. delta timestep A = sigma_next - sigma_hat # store for 2nd order step A = derivative A = dt A = sample else: # 2. 2nd order / Heun's method A = (sample - pred_original_sample) / sigma_next A = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample A = self.dt A = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" A = None A = None A = None A = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=A_ ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples A = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(A_ ): # mps does not support float64 A = self.timesteps.to(original_samples.device ,dtype=torch.floataa ) A = timesteps.to(original_samples.device ,dtype=torch.floataa ) else: A = self.timesteps.to(original_samples.device ) A = timesteps.to(original_samples.device ) A = [self.index_for_timestep(A_ ,A_ ) for t in timesteps] A = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): A = sigma.unsqueeze(-1 ) A = original_samples + noise * sigma return noisy_samples def __len__( self : Dict ) -> int: return self.config.num_train_timesteps
74
1
"""simple docstring""" def _snake_case ( snake_case__ : int = 200 ): A = [1, 2, 5, 10, 20, 50, 100, 200] A = [0] * (pence + 1) A = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(snake_case__ , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_00) == 7_36_82
74
"""simple docstring""" class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Dict ,A_ : list[int] ) -> None: A = len(A_ ) A = [0] * len_array if len_array > 0: A = array[0] for i in range(1 ,A_ ): A = self.prefix_sum[i - 1] + array[i] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> int: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ) -> bool: A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(A_ ) return False if __name__ == "__main__": import doctest doctest.testmod()
74
1
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml _lowercase = NewType('''DataClass''', Any) _lowercase = NewType('''DataClassType''', Any) def _snake_case ( snake_case__ : Tuple ): if isinstance(snake_case__ , snake_case__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' ) def _snake_case ( snake_case__ : list ): A = {str(snake_case__ ): choice for choice in choices} return lambda snake_case__ : str_to_choice.get(snake_case__ , snake_case__ ) def _snake_case ( *, snake_case__ : Union[str, List[str]] = None , snake_case__ : str = None , snake_case__ : Any = dataclasses.MISSING , snake_case__ : Callable[[], Any] = dataclasses.MISSING , snake_case__ : dict = None , **snake_case__ : Any , ): if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls A = {} if aliases is not None: A = aliases if help is not None: A = help return dataclasses.field(metadata=snake_case__ , default=snake_case__ , default_factory=snake_case__ , **snake_case__ ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Iterable[DataClassType] def __init__( self : List[str] ,A_ : Union[DataClassType, Iterable[DataClassType]] ,**A_ : Any ) -> Optional[int]: # To make the default appear when using --help if "formatter_class" not in kwargs: A = ArgumentDefaultsHelpFormatter super().__init__(**A_ ) if dataclasses.is_dataclass(A_ ): A = [dataclass_types] A = list(A_ ) for dtype in self.dataclass_types: self._add_dataclass_arguments(A_ ) @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ,A_ : dataclasses.Field ) -> Optional[Any]: A = F'--{field.name}' A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type ,A_ ): raise RuntimeError( 'Unresolved type detected, which should have been done with the help of ' '`typing.get_type_hints` method by default' ) A = kwargs.pop('aliases' ,[] ) if isinstance(A_ ,A_ ): A = [aliases] A = getattr(field.type ,'__origin__' ,field.type ) if origin_type is Union or (hasattr(A_ ,'UnionType' ) and isinstance(A_ ,types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(A_ ) not in field.type.__args__ ): raise ValueError( 'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because' ' the argument parser only supports one type per argument.' F' Problem encountered in field \'{field.name}\'.' ) if type(A_ ) not in field.type.__args__: # filter `str` in Union A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] A = getattr(field.type ,'__origin__' ,field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) A = ( field.type.__args__[0] if isinstance(A_ ,field.type.__args__[1] ) else field.type.__args__[1] ) A = getattr(field.type ,'__origin__' ,field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) A = {} if origin_type is Literal or (isinstance(field.type ,A_ ) and issubclass(field.type ,A_ )): if origin_type is Literal: A = field.type.__args__ else: A = [x.value for x in field.type] A = make_choice_type_function(kwargs['choices'] ) if field.default is not dataclasses.MISSING: A = field.default else: A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument A = copy(A_ ) # Hack because type=bool in argparse does not behave as we want. A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way A = default # This tells argparse we accept 0 or 1 value after --field_name A = '?' # This is the value that will get picked if we do --field_name (without value) A = True elif isclass(A_ ) and issubclass(A_ ,A_ ): A = field.type.__args__[0] A = '+' if field.default_factory is not dataclasses.MISSING: A = field.default_factory() elif field.default is dataclasses.MISSING: A = True else: A = field.type if field.default is not dataclasses.MISSING: A = field.default elif field.default_factory is not dataclasses.MISSING: A = field.default_factory() else: A = True parser.add_argument(A_ ,*A_ ,**A_ ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): A = False parser.add_argument(F'--no_{field.name}' ,action='store_false' ,dest=field.name ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : DataClassType ) -> List[Any]: if hasattr(A_ ,'_argument_group_name' ): A = self.add_argument_group(dtype._argument_group_name ) else: A = self try: A = get_type_hints(A_ ) except NameError: raise RuntimeError( F'Type resolution failed for {dtype}. Try declaring the class in global scope or ' 'removing line of `from __future__ import annotations` which opts in Postponed ' 'Evaluation of Annotations (PEP 563)' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A_ ): A = '.'.join(map(A_ ,sys.version_info[:3] ) ) raise RuntimeError( F'Type resolution failed for {dtype} on Python {python_version}. Try removing ' 'line of `from __future__ import annotations` which opts in union types as ' '`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ' 'support Python versions that lower than 3.10, you need to use ' '`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ' '`X | None`.' ) from ex raise for field in dataclasses.fields(A_ ): if not field.init: continue A = type_hints[field.name] self._parse_dataclass_field(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Any=None ,A_ : int=False ,A_ : Any=True ,A_ : List[str]=None ,A_ : Union[str, Any]=None ,) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): A = [] if args_filename: args_files.append(Path(A_ ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values A = ArgumentParser() args_file_parser.add_argument(A_ ,type=A_ ,action='append' ) # Use only remaining args for further parsing (remove the args_file_flag) A , A = args_file_parser.parse_known_args(args=A_ ) A = vars(A_ ).get(args_file_flag.lstrip('-' ) ,A_ ) if cmd_args_file_paths: args_files.extend([Path(A_ ) for p in cmd_args_file_paths] ) A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last A = file_args + args if args is not None else file_args + sys.argv[1:] A , A = self.parse_known_args(args=A_ ) A = [] for dtype in self.dataclass_types: A = {f.name for f in dataclasses.fields(A_ ) if f.init} A = {k: v for k, v in vars(A_ ).items() if k in keys} for k in keys: delattr(A_ ,A_ ) A = dtype(**A_ ) outputs.append(A_ ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(A_ ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' ) return (*outputs,) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Dict[str, Any] ,A_ : bool = False ) -> Tuple[DataClass, ...]: A = set(args.keys() ) A = [] for dtype in self.dataclass_types: A = {f.name for f in dataclasses.fields(A_ ) if f.init} A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) A = dtype(**A_ ) outputs.append(A_ ) if not allow_extra_keys and unused_keys: raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(A_ )}' ) return tuple(A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]: with open(Path(A_ ) ,encoding='utf-8' ) as open_json_file: A = json.loads(open_json_file.read() ) A = self.parse_dict(A_ ,allow_extra_keys=A_ ) return tuple(A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]: A = self.parse_dict(yaml.safe_load(Path(A_ ).read_text() ) ,allow_extra_keys=A_ ) return tuple(A_ )
74
"""simple docstring""" import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def _snake_case ( snake_case__ : str , snake_case__ : str ): A = RobertaPreLayerNormConfig.from_pretrained( snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] ) # convert state_dict A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) ) A = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.' ): A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ): continue A = tensor_value A = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ ) model.save_pretrained(snake_case__ ) # convert tokenizer A = AutoTokenizer.from_pretrained(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowercase = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
74
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''google/vivit-b-16x2-kinetics400''': ( '''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json''' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Dict = '''vivit''' def __init__( self : str ,A_ : List[str]=224 ,A_ : Union[str, Any]=32 ,A_ : List[str]=[2, 16, 16] ,A_ : Any=3 ,A_ : int=768 ,A_ : Optional[int]=12 ,A_ : int=12 ,A_ : Any=3072 ,A_ : Union[str, Any]="gelu_fast" ,A_ : Any=0.0 ,A_ : Dict=0.0 ,A_ : Optional[int]=0.02 ,A_ : Union[str, Any]=1e-06 ,A_ : Union[str, Any]=True ,**A_ : List[Any] ,) -> Dict: A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = num_frames A = tubelet_size A = num_channels A = qkv_bias super().__init__(**A_ )
74
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''roformer''' def __init__( self : Tuple ,A_ : Optional[int]=5_0000 ,A_ : Tuple=None ,A_ : Optional[Any]=768 ,A_ : Dict=12 ,A_ : Optional[int]=12 ,A_ : Union[str, Any]=3072 ,A_ : Dict="gelu" ,A_ : Dict=0.1 ,A_ : List[Any]=0.1 ,A_ : List[Any]=1536 ,A_ : List[str]=2 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : Optional[int]=0 ,A_ : List[str]=False ,A_ : Tuple=True ,**A_ : List[str] ,) -> Dict: super().__init__(pad_token_id=A_ ,**A_ ) A = vocab_size A = hidden_size if embedding_size is None else embedding_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_act A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = initializer_range A = layer_norm_eps A = rotary_value A = use_cache class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": A = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A = {0: 'batch', 1: 'sequence'} A = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
74
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowercase = logging.get_logger(__name__) _lowercase = { '''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''', # See all Dinat models at https://huggingface.co/models?filter=dinat } class lowerCAmelCase_ ( _lowercase , _lowercase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = '''dinat''' _lowerCamelCase: str = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Union[str, Any] ,A_ : Optional[int]=4 ,A_ : Union[str, Any]=3 ,A_ : List[str]=64 ,A_ : List[str]=[3, 4, 6, 5] ,A_ : Any=[2, 4, 8, 16] ,A_ : List[Any]=7 ,A_ : Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] ,A_ : str=3.0 ,A_ : List[Any]=True ,A_ : Any=0.0 ,A_ : str=0.0 ,A_ : Union[str, Any]=0.1 ,A_ : int="gelu" ,A_ : List[str]=0.02 ,A_ : Optional[Any]=1e-5 ,A_ : Dict=0.0 ,A_ : int=None ,A_ : Optional[Any]=None ,**A_ : Tuple ,) -> Union[str, Any]: super().__init__(**A_ ) A = patch_size A = num_channels A = embed_dim A = depths A = len(A_ ) A = num_heads A = kernel_size A = dilations A = mlp_ratio A = qkv_bias A = hidden_dropout_prob A = attention_probs_dropout_prob A = drop_path_rate A = hidden_act A = layer_norm_eps A = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model A = int(embed_dim * 2 ** (len(A_ ) - 1) ) A = layer_scale_init_value A = ['stem'] + [F'stage{idx}' for idx in range(1 ,len(A_ ) + 1 )] A , A = get_aligned_output_features_output_indices( out_features=A_ ,out_indices=A_ ,stage_names=self.stage_names )
74
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _snake_case ( snake_case__ : Dict ): A = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) def _snake_case ( snake_case__ : int ): A , A = emb.weight.shape A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) A = emb.weight.data return lin_layer def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ): A = torch.load(snake_case__ , map_location='cpu' )['model'] remove_ignore_keys_(snake_case__ ) A = state_dict['encoder.embed_tokens.weight'].shape[0] A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ ) if mbart_aa and finetuned: A = 'relu' A = state_dict['decoder.embed_tokens.weight'] A = MBartForConditionalGeneration(snake_case__ ) model.model.load_state_dict(snake_case__ ) if finetuned: A = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') _lowercase = parser.parse_args() _lowercase = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
74
1
"""simple docstring""" from sklearn.metrics import recall_score import datasets _lowercase = ''' Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. ''' _lowercase = ''' Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric(\'recall\') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {\'recall\': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric(\'recall\') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {\'recall\': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric(\'recall\') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {\'recall\': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric(\'recall\') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\') >>> print(results) {\'recall\': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\') >>> print(results) {\'recall\': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\') >>> print(results) {\'recall\': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'recall\': array([1., 0., 0.])} ''' _lowercase = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('int32' ) ), 'references': datasets.Sequence(datasets.Value('int32' ) ), } if self.config_name == 'multilabel' else { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ,A_ : List[Any] ,A_ : Optional[int]=None ,A_ : Optional[int]=1 ,A_ : Tuple="binary" ,A_ : Union[str, Any]=None ,A_ : str="warn" ,) -> Dict: A = recall_score( A_ ,A_ ,labels=A_ ,pos_label=A_ ,average=A_ ,sample_weight=A_ ,zero_division=A_ ,) return {"recall": float(A_ ) if score.size == 1 else score}
74
"""simple docstring""" import argparse import struct import unittest class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : bytes ) -> None: A = data # Initialize hash values A = [ 0X6_A_0_9_E_6_6_7, 0XB_B_6_7_A_E_8_5, 0X3_C_6_E_F_3_7_2, 0XA_5_4_F_F_5_3_A, 0X5_1_0_E_5_2_7_F, 0X9_B_0_5_6_8_8_C, 0X1_F_8_3_D_9_A_B, 0X5_B_E_0_C_D_1_9, ] # Initialize round constants A = [ 0X4_2_8_A_2_F_9_8, 0X7_1_3_7_4_4_9_1, 0XB_5_C_0_F_B_C_F, 0XE_9_B_5_D_B_A_5, 0X3_9_5_6_C_2_5_B, 0X5_9_F_1_1_1_F_1, 0X9_2_3_F_8_2_A_4, 0XA_B_1_C_5_E_D_5, 0XD_8_0_7_A_A_9_8, 0X1_2_8_3_5_B_0_1, 0X2_4_3_1_8_5_B_E, 0X5_5_0_C_7_D_C_3, 0X7_2_B_E_5_D_7_4, 0X8_0_D_E_B_1_F_E, 0X9_B_D_C_0_6_A_7, 0XC_1_9_B_F_1_7_4, 0XE_4_9_B_6_9_C_1, 0XE_F_B_E_4_7_8_6, 0X0_F_C_1_9_D_C_6, 0X2_4_0_C_A_1_C_C, 0X2_D_E_9_2_C_6_F, 0X4_A_7_4_8_4_A_A, 0X5_C_B_0_A_9_D_C, 0X7_6_F_9_8_8_D_A, 0X9_8_3_E_5_1_5_2, 0XA_8_3_1_C_6_6_D, 0XB_0_0_3_2_7_C_8, 0XB_F_5_9_7_F_C_7, 0XC_6_E_0_0_B_F_3, 0XD_5_A_7_9_1_4_7, 0X0_6_C_A_6_3_5_1, 0X1_4_2_9_2_9_6_7, 0X2_7_B_7_0_A_8_5, 0X2_E_1_B_2_1_3_8, 0X4_D_2_C_6_D_F_C, 0X5_3_3_8_0_D_1_3, 0X6_5_0_A_7_3_5_4, 0X7_6_6_A_0_A_B_B, 0X8_1_C_2_C_9_2_E, 0X9_2_7_2_2_C_8_5, 0XA_2_B_F_E_8_A_1, 0XA_8_1_A_6_6_4_B, 0XC_2_4_B_8_B_7_0, 0XC_7_6_C_5_1_A_3, 0XD_1_9_2_E_8_1_9, 0XD_6_9_9_0_6_2_4, 0XF_4_0_E_3_5_8_5, 0X1_0_6_A_A_0_7_0, 0X1_9_A_4_C_1_1_6, 0X1_E_3_7_6_C_0_8, 0X2_7_4_8_7_7_4_C, 0X3_4_B_0_B_C_B_5, 0X3_9_1_C_0_C_B_3, 0X4_E_D_8_A_A_4_A, 0X5_B_9_C_C_A_4_F, 0X6_8_2_E_6_F_F_3, 0X7_4_8_F_8_2_E_E, 0X7_8_A_5_6_3_6_F, 0X8_4_C_8_7_8_1_4, 0X8_C_C_7_0_2_0_8, 0X9_0_B_E_F_F_F_A, 0XA_4_5_0_6_C_E_B, 0XB_E_F_9_A_3_F_7, 0XC_6_7_1_7_8_F_2, ] A = self.preprocessing(self.data ) self.final_hash() @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : bytes ) -> bytes: A = B'\x80' + (B'\x00' * (63 - (len(A_ ) + 8) % 64)) A = struct.pack('>Q' ,(len(A_ ) * 8) ) return data + padding + big_endian_integer def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None: # Convert into blocks of 64 bytes A = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers A = list(struct.unpack('>16L' ,A_ ) ) # add 48 0-ed integers words += [0] * 48 A , A , A , A , A , A , A , A = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array A = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) A = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) A = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X1_0_0_0_0_0_0_0_0 # Compression A = self.ror(A_ ,6 ) ^ self.ror(A_ ,11 ) ^ self.ror(A_ ,25 ) A = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g) A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X1_0_0_0_0_0_0_0_0 A = self.ror(A_ ,2 ) ^ self.ror(A_ ,13 ) ^ self.ror(A_ ,22 ) A = (a & b) ^ (a & c) ^ (b & c) A = (sa + maj) % 0X1_0_0_0_0_0_0_0_0 A , A , A , A , A , A , A , A = ( g, f, e, ((d + tempa) % 0X1_0_0_0_0_0_0_0_0), c, b, a, ((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0), ) A = [a, b, c, d, e, f, g, h] # Modify final values A = [ ((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0) for index, element in enumerate(self.hashes ) ] A = ''.join([hex(A_ )[2:].zfill(8 ) for value in self.hashes] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : int ) -> int: return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations) class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None: import hashlib A = bytes('Test String' ,'utf-8' ) self.assertEqual(SHAaaa(A_ ).hash ,hashlib.shaaaa(A_ ).hexdigest() ) def _snake_case ( ): import doctest doctest.testmod() A = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) A = parser.parse_args() A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: A = f.read() else: A = bytes(snake_case__ , 'utf-8' ) print(SHAaaa(snake_case__ ).hash ) if __name__ == "__main__": main()
74
1
"""simple docstring""" import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] ) def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : int ): if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , snake_case__ ) A = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: A = dataset_size < in_memory_max_size else: A = False A = is_small_dataset(snake_case__ ) assert result == expected
74
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowercase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''DeiTFeatureExtractor'''] _lowercase = ['''DeiTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DeiTForImageClassification''', '''DeiTForImageClassificationWithTeacher''', '''DeiTForMaskedImageModeling''', '''DeiTModel''', '''DeiTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDeiTForImageClassification''', '''TFDeiTForImageClassificationWithTeacher''', '''TFDeiTForMaskedImageModeling''', '''TFDeiTModel''', '''TFDeiTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
74
1
"""simple docstring""" from __future__ import annotations def _snake_case ( snake_case__ : str ): return [ord(snake_case__ ) - 96 for elem in plain] def _snake_case ( snake_case__ : list[int] ): return "".join(chr(elem + 96 ) for elem in encoded ) def _snake_case ( ): A = encode(input('-> ' ).strip().lower() ) print('Encoded: ' , snake_case__ ) print('Decoded:' , decode(snake_case__ ) ) if __name__ == "__main__": main()
74
"""simple docstring""" from __future__ import annotations import requests def _snake_case ( snake_case__ : str ): A = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty' return requests.get(snake_case__ ).json() def _snake_case ( snake_case__ : int = 10 ): A = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' A = requests.get(snake_case__ ).json()[:max_stories] return [get_hackernews_story(snake_case__ ) for story_id in story_ids] def _snake_case ( snake_case__ : int = 10 ): A = hackernews_top_stories(snake_case__ ) return "\n".join('* [{title}]({url})'.format(**snake_case__ ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
74
1
"""simple docstring""" from math import factorial def _snake_case ( snake_case__ : int = 100 ): return sum(map(snake_case__ , str(factorial(snake_case__ ) ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
74
"""simple docstring""" from string import ascii_uppercase _lowercase = {char: i for i, char in enumerate(ascii_uppercase)} _lowercase = dict(enumerate(ascii_uppercase)) def _snake_case ( snake_case__ : str , snake_case__ : str ): A = len(snake_case__ ) A = 0 while True: if x == i: A = 0 if len(snake_case__ ) == len(snake_case__ ): break key += key[i] i += 1 return key def _snake_case ( snake_case__ : str , snake_case__ : str ): A = '' A = 0 for letter in message: if letter == " ": cipher_text += " " else: A = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def _snake_case ( snake_case__ : str , snake_case__ : str ): A = '' A = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: A = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def _snake_case ( ): A = 'THE GERMAN ATTACK' A = 'SECRET' A = generate_key(snake_case__ , snake_case__ ) A = cipher_text(snake_case__ , snake_case__ ) print(F'Encrypted Text = {s}' ) print(F'Original Text = {original_text(snake_case__ , snake_case__ )}' ) if __name__ == "__main__": import doctest doctest.testmod() main()
74
1
"""simple docstring""" import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def _snake_case ( snake_case__ : str , snake_case__ : str ): A = RobertaPreLayerNormConfig.from_pretrained( snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] ) # convert state_dict A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) ) A = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.' ): A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ): continue A = tensor_value A = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ ) model.save_pretrained(snake_case__ ) # convert tokenizer A = AutoTokenizer.from_pretrained(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowercase = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
74
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ) -> Optional[Any]: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): A = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: A = 'sgugger/tiny-distilbert-classification' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,only_pretrain_model=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,torchscript=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,fpaa=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: A = 'sshleifer/tiny-gpt2' A = AutoConfig.from_pretrained(A_ ) # set architectures equal to `None` A = None A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=A_ ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = 'sshleifer/tiny-gpt2' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = 'sshleifer/tinier_bart' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'sshleifer/tiny-gpt2' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'sshleifer/tinier_bart' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: A = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,save_to_csv=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(A_ ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(A_ ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(A_ ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(A_ ,'train_time.csv' ) ,env_info_csv_file=os.path.join(A_ ,'env.csv' ) ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) benchmark.run() self.assertTrue(Path(os.path.join(A_ ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'train_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'train_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'env.csv' ) ).exists() ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: A = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(A_ : Optional[int] ): self.assertTrue(hasattr(A_ ,'sequential' ) ) self.assertTrue(hasattr(A_ ,'cumulative' ) ) self.assertTrue(hasattr(A_ ,'current' ) ) self.assertTrue(hasattr(A_ ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(A_ ,'log.txt' ) ,log_print=A_ ,trace_memory_line_by_line=A_ ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(A_ ,'log.txt' ) ).exists() )
74
1
"""simple docstring""" import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": _lowercase = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') _lowercase = F"""https://www.google.com/search?q={query}&num=100""" _lowercase = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: _lowercase = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: _lowercase = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
74
"""simple docstring""" # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _lowercase = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[Union[str, int]] = None _lowerCamelCase: Optional[Union[str, int]] = None _lowerCamelCase: Optional[Union[str, int]] = None def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: A , A , A = _str_to_version_tuple(self.version_str ) def __repr__( self : Optional[int] ) -> Dict: return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: return self.major, self.minor, self.patch def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ) -> Union[str, Any]: if isinstance(A_ ,A_ ): return Version(A_ ) elif isinstance(A_ ,A_ ): return other raise TypeError(F'{other} (type {type(A_ )}) cannot be compared to version.' ) def __eq__( self : List[Any] ,A_ : Dict ) -> Any: try: A = self._validate_operand(A_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : List[Any] ,A_ : Optional[int] ) -> Tuple: A = self._validate_operand(A_ ) return self.tuple < other.tuple def __hash__( self : Union[str, Any] ) -> Union[str, Any]: return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : List[str] ) -> List[str]: A = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self.version_str def _snake_case ( snake_case__ : List[str] ): A = _VERSION_REG.match(snake_case__ ) if not res: raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] ) def _snake_case ( snake_case__ : str ): return ".".join(str(snake_case__ ) for v in version_tuple )
74
1
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ,A_ : str ,A_ : Dict=13 ,A_ : str=7 ,A_ : str=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=False ,A_ : str=False ,A_ : Tuple=False ,A_ : str=2 ,A_ : Optional[int]=99 ,A_ : Union[str, Any]=0 ,A_ : Optional[Any]=32 ,A_ : Optional[int]=5 ,A_ : Optional[int]=4 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=0.1 ,A_ : Union[str, Any]=512 ,A_ : Union[str, Any]=2 ,A_ : Any=0.02 ,A_ : List[str]=2 ,A_ : int=4 ,A_ : int="last" ,A_ : Dict=True ,A_ : Union[str, Any]=None ,A_ : Any=0 ,) -> List[Any]: A = parent A = batch_size A = seq_length A = is_training A = use_input_lengths A = use_token_type_ids A = use_labels A = gelu_activation A = sinusoidal_embeddings A = causal A = asm A = n_langs A = vocab_size A = n_special A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = summary_type A = use_proj A = scope A = bos_token_id def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_input_lengths: A = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,2 ).float() A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: return XLMConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ,A_ : int ,A_ : Dict ,A_ : str ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : int ,A_ : str ,) -> Any: A = XLMModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,lengths=A_ ,langs=A_ ) A = model(A_ ,langs=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Any ,A_ : str ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : str ,A_ : Any ,A_ : str ,A_ : Dict ,) -> Dict: A = XLMWithLMHeadModel(A_ ) model.to(A_ ) model.eval() A = model(A_ ,token_type_ids=A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Any ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[Any] ,) -> int: A = XLMForQuestionAnsweringSimple(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,start_positions=A_ ,end_positions=A_ ) A = outputs self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Optional[int] ,A_ : Any ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Tuple ,A_ : List[str] ,A_ : Optional[int] ,) -> List[Any]: A = XLMForQuestionAnswering(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,) ((A) , ) = result_with_labels.to_tuple() A = model(A_ ,start_positions=A_ ,end_positions=A_ ) ((A) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape ,() ) self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : int ,A_ : Optional[int] ,A_ : List[str] ,A_ : str ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ,) -> Optional[int]: A = XLMForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ,A_ : str ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : Optional[int] ,) -> List[str]: A = self.num_labels A = XLMForTokenClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : Union[str, Any] ,A_ : Dict ,A_ : List[Any] ,) -> List[str]: A = self.num_choices A = XLMForMultipleChoice(config=A_ ) model.to(A_ ) model.eval() A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = model( A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _lowerCamelCase: str = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _lowerCamelCase: Optional[int] = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ) -> Any: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Optional[int] ,A_ : List[Any]=False ) -> int: A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = XLMModelTester(self ) A = ConfigTester(self ,config_class=A_ ,emb_dim=37 ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : Any ,A_ : str ,A_ : Tuple ,A_ : Any ,A_ : Any=False ,A_ : Any=1 ) -> List[Any]: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) ) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = min_length + idx + 1 A = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : str ,A_ : Optional[int] ,A_ : int ,A_ : Any ,A_ : str=False ,A_ : Any=1 ) -> Tuple: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,) pass @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = XLMModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(A_ ) A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president A = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A = model.generate(A_ ,do_sample=A_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ )
74
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml _lowercase = NewType('''DataClass''', Any) _lowercase = NewType('''DataClassType''', Any) def _snake_case ( snake_case__ : Tuple ): if isinstance(snake_case__ , snake_case__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' ) def _snake_case ( snake_case__ : list ): A = {str(snake_case__ ): choice for choice in choices} return lambda snake_case__ : str_to_choice.get(snake_case__ , snake_case__ ) def _snake_case ( *, snake_case__ : Union[str, List[str]] = None , snake_case__ : str = None , snake_case__ : Any = dataclasses.MISSING , snake_case__ : Callable[[], Any] = dataclasses.MISSING , snake_case__ : dict = None , **snake_case__ : Any , ): if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls A = {} if aliases is not None: A = aliases if help is not None: A = help return dataclasses.field(metadata=snake_case__ , default=snake_case__ , default_factory=snake_case__ , **snake_case__ ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Iterable[DataClassType] def __init__( self : List[str] ,A_ : Union[DataClassType, Iterable[DataClassType]] ,**A_ : Any ) -> Optional[int]: # To make the default appear when using --help if "formatter_class" not in kwargs: A = ArgumentDefaultsHelpFormatter super().__init__(**A_ ) if dataclasses.is_dataclass(A_ ): A = [dataclass_types] A = list(A_ ) for dtype in self.dataclass_types: self._add_dataclass_arguments(A_ ) @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ,A_ : dataclasses.Field ) -> Optional[Any]: A = F'--{field.name}' A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type ,A_ ): raise RuntimeError( 'Unresolved type detected, which should have been done with the help of ' '`typing.get_type_hints` method by default' ) A = kwargs.pop('aliases' ,[] ) if isinstance(A_ ,A_ ): A = [aliases] A = getattr(field.type ,'__origin__' ,field.type ) if origin_type is Union or (hasattr(A_ ,'UnionType' ) and isinstance(A_ ,types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(A_ ) not in field.type.__args__ ): raise ValueError( 'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because' ' the argument parser only supports one type per argument.' F' Problem encountered in field \'{field.name}\'.' ) if type(A_ ) not in field.type.__args__: # filter `str` in Union A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] A = getattr(field.type ,'__origin__' ,field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) A = ( field.type.__args__[0] if isinstance(A_ ,field.type.__args__[1] ) else field.type.__args__[1] ) A = getattr(field.type ,'__origin__' ,field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) A = {} if origin_type is Literal or (isinstance(field.type ,A_ ) and issubclass(field.type ,A_ )): if origin_type is Literal: A = field.type.__args__ else: A = [x.value for x in field.type] A = make_choice_type_function(kwargs['choices'] ) if field.default is not dataclasses.MISSING: A = field.default else: A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument A = copy(A_ ) # Hack because type=bool in argparse does not behave as we want. A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way A = default # This tells argparse we accept 0 or 1 value after --field_name A = '?' # This is the value that will get picked if we do --field_name (without value) A = True elif isclass(A_ ) and issubclass(A_ ,A_ ): A = field.type.__args__[0] A = '+' if field.default_factory is not dataclasses.MISSING: A = field.default_factory() elif field.default is dataclasses.MISSING: A = True else: A = field.type if field.default is not dataclasses.MISSING: A = field.default elif field.default_factory is not dataclasses.MISSING: A = field.default_factory() else: A = True parser.add_argument(A_ ,*A_ ,**A_ ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): A = False parser.add_argument(F'--no_{field.name}' ,action='store_false' ,dest=field.name ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : DataClassType ) -> List[Any]: if hasattr(A_ ,'_argument_group_name' ): A = self.add_argument_group(dtype._argument_group_name ) else: A = self try: A = get_type_hints(A_ ) except NameError: raise RuntimeError( F'Type resolution failed for {dtype}. Try declaring the class in global scope or ' 'removing line of `from __future__ import annotations` which opts in Postponed ' 'Evaluation of Annotations (PEP 563)' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A_ ): A = '.'.join(map(A_ ,sys.version_info[:3] ) ) raise RuntimeError( F'Type resolution failed for {dtype} on Python {python_version}. Try removing ' 'line of `from __future__ import annotations` which opts in union types as ' '`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ' 'support Python versions that lower than 3.10, you need to use ' '`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ' '`X | None`.' ) from ex raise for field in dataclasses.fields(A_ ): if not field.init: continue A = type_hints[field.name] self._parse_dataclass_field(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Any=None ,A_ : int=False ,A_ : Any=True ,A_ : List[str]=None ,A_ : Union[str, Any]=None ,) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): A = [] if args_filename: args_files.append(Path(A_ ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values A = ArgumentParser() args_file_parser.add_argument(A_ ,type=A_ ,action='append' ) # Use only remaining args for further parsing (remove the args_file_flag) A , A = args_file_parser.parse_known_args(args=A_ ) A = vars(A_ ).get(args_file_flag.lstrip('-' ) ,A_ ) if cmd_args_file_paths: args_files.extend([Path(A_ ) for p in cmd_args_file_paths] ) A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last A = file_args + args if args is not None else file_args + sys.argv[1:] A , A = self.parse_known_args(args=A_ ) A = [] for dtype in self.dataclass_types: A = {f.name for f in dataclasses.fields(A_ ) if f.init} A = {k: v for k, v in vars(A_ ).items() if k in keys} for k in keys: delattr(A_ ,A_ ) A = dtype(**A_ ) outputs.append(A_ ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(A_ ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' ) return (*outputs,) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Dict[str, Any] ,A_ : bool = False ) -> Tuple[DataClass, ...]: A = set(args.keys() ) A = [] for dtype in self.dataclass_types: A = {f.name for f in dataclasses.fields(A_ ) if f.init} A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) A = dtype(**A_ ) outputs.append(A_ ) if not allow_extra_keys and unused_keys: raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(A_ )}' ) return tuple(A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]: with open(Path(A_ ) ,encoding='utf-8' ) as open_json_file: A = json.loads(open_json_file.read() ) A = self.parse_dict(A_ ,allow_extra_keys=A_ ) return tuple(A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]: A = self.parse_dict(yaml.safe_load(Path(A_ ).read_text() ) ,allow_extra_keys=A_ ) return tuple(A_ )
74
1
"""simple docstring""" from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class lowerCAmelCase_ : '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ) -> Optional[int]: raise NotImplementedError() def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: raise NotImplementedError() class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Optional[int] ,A_ : "AutoTokenizer" ,A_ : bool = False ,**A_ : Optional[int] ) -> Optional[Any]: A = tokenizer A = skip_prompt A = decode_kwargs # variables used in the streaming process A = [] A = 0 A = True def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Dict ) -> int: if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError('TextStreamer only supports batch size 1' ) elif len(value.shape ) > 1: A = value[0] if self.skip_prompt and self.next_tokens_are_prompt: A = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) A = self.tokenizer.decode(self.token_cache ,**self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith('\n' ): A = text[self.print_len :] A = [] A = 0 # If the last token is a CJK character, we print the characters. elif len(A_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ): A = text[self.print_len :] self.print_len += len(A_ ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: A = text[self.print_len : text.rfind(' ' ) + 1] self.print_len += len(A_ ) self.on_finalized_text(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: # Flush the cache, if it exists if len(self.token_cache ) > 0: A = self.tokenizer.decode(self.token_cache ,**self.decode_kwargs ) A = text[self.print_len :] A = [] A = 0 else: A = '' A = True self.on_finalized_text(A_ ,stream_end=A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : str ,A_ : bool = False ) -> List[str]: print(A_ ,flush=A_ ,end='' if not stream_end else None ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Optional[int] ) -> Dict: # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Dict ,A_ : "AutoTokenizer" ,A_ : bool = False ,A_ : Optional[float] = None ,**A_ : Tuple ) -> Union[str, Any]: super().__init__(A_ ,A_ ,**A_ ) A = Queue() A = None A = timeout def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : bool = False ) -> Any: self.text_queue.put(A_ ,timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal ,timeout=self.timeout ) def __iter__( self : Optional[int] ) -> Tuple: return self def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
74
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _lowercase = 16 _lowercase = 32 def _snake_case ( snake_case__ : Accelerator , snake_case__ : int = 16 , snake_case__ : str = "bert-base-cased" ): A = AutoTokenizer.from_pretrained(snake_case__ ) A = load_dataset('glue' , 'mrpc' ) def tokenize_function(snake_case__ : Dict ): # max_length=None => use the model max length (it's actually the default) A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset A = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=snake_case__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(snake_case__ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. A = DataLoader( tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) A = DataLoader( tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Optional[int] ): # Initialize accelerator A = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A = config['lr'] A = int(config['num_epochs'] ) A = int(config['seed'] ) A = int(config['batch_size'] ) A = args.model_name_or_path set_seed(snake_case__ ) A , A = get_dataloaders(snake_case__ , snake_case__ , snake_case__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ ) # Instantiate optimizer A = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) A = optimizer_cls(params=model.parameters() , lr=snake_case__ ) if accelerator.state.deepspeed_plugin is not None: A = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: A = 1 A = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): A = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , ) else: A = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A , A , A , A , A = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # We need to keep track of how many total steps we have iterated over A = 0 # We also need to keep track of the stating epoch so files are named properly A = 0 # Now we train the model A = evaluate.load('glue' , 'mrpc' ) A = 0 A = {} for epoch in range(snake_case__ , snake_case__ ): model.train() for step, batch in enumerate(snake_case__ ): A = model(**snake_case__ ) A = outputs.loss A = loss / gradient_accumulation_steps accelerator.backward(snake_case__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() A = 0 for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A = model(**snake_case__ ) A = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times A , A = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(snake_case__ ) - 1: A = predictions[: len(eval_dataloader.dataset ) - samples_seen] A = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) A = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , snake_case__ ) A = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: A = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(snake_case__ , snake_case__ ) def _snake_case ( ): A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=snake_case__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , ) parser.add_argument( '--output_dir' , type=snake_case__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=snake_case__ , default=snake_case__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=snake_case__ , default=3 , help='Number of train epochs.' , ) A = parser.parse_args() A = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
74
1
"""simple docstring""" from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING _lowercase = logging.get_logger(__name__) @add_end_docstrings(_lowercase ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Tuple ,**A_ : str ) -> str: super().__init__(**A_ ) requires_backends(self ,'vision' ) requires_backends(self ,'torch' ) if self.framework != "pt": raise ValueError(F'The {self.__class__} is only available in PyTorch.' ) self.check_model_type(A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,**A_ : Union[str, Any] ) -> int: A = {} A = {} A = {} # preprocess args if "points_per_batch" in kwargs: A = kwargs['points_per_batch'] if "points_per_crop" in kwargs: A = kwargs['points_per_crop'] if "crops_n_layers" in kwargs: A = kwargs['crops_n_layers'] if "crop_overlap_ratio" in kwargs: A = kwargs['crop_overlap_ratio'] if "crop_n_points_downscale_factor" in kwargs: A = kwargs['crop_n_points_downscale_factor'] # postprocess args if "pred_iou_thresh" in kwargs: A = kwargs['pred_iou_thresh'] if "stability_score_offset" in kwargs: A = kwargs['stability_score_offset'] if "mask_threshold" in kwargs: A = kwargs['mask_threshold'] if "stability_score_thresh" in kwargs: A = kwargs['stability_score_thresh'] if "crops_nms_thresh" in kwargs: A = kwargs['crops_nms_thresh'] if "output_rle_mask" in kwargs: A = kwargs['output_rle_mask'] if "output_bboxes_mask" in kwargs: A = kwargs['output_bboxes_mask'] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : Union[str, Any] ,A_ : str ,*A_ : Dict ,A_ : int=None ,A_ : Optional[int]=None ,**A_ : Union[str, Any] ) -> List[Any]: return super().__call__(A_ ,*A_ ,num_workers=A_ ,batch_size=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : List[str]=64 ,A_ : int = 0 ,A_ : float = 512 / 1500 ,A_ : Optional[int] = 32 ,A_ : Optional[int] = 1 ,) -> Tuple: A = load_image(A_ ) A = self.image_processor.size['longest_edge'] A , A , A , A = self.image_processor.generate_crop_boxes( A_ ,A_ ,A_ ,A_ ,A_ ,A_ ) A = self.image_processor(images=A_ ,return_tensors='pt' ) with self.device_placement(): if self.framework == "pt": A = self.get_inference_context() with inference_context(): A = self._ensure_tensor_on_device(A_ ,device=self.device ) A = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) ) A = image_embeddings A = grid_points.shape[1] A = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( 'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ' 'To return all points at once, set points_per_batch to None' ) for i in range(0 ,A_ ,A_ ): A = grid_points[:, i : i + points_per_batch, :, :] A = input_labels[:, i : i + points_per_batch] A = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[str] ,A_ : List[Any]=0.88 ,A_ : Dict=0.95 ,A_ : Optional[int]=0 ,A_ : Union[str, Any]=1 ,) -> str: A = model_inputs.pop('input_boxes' ) A = model_inputs.pop('is_last' ) A = model_inputs.pop('original_sizes' ).tolist() A = model_inputs.pop('reshaped_input_sizes' ).tolist() A = self.model(**A_ ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks A = model_outputs['pred_masks'] A = self.image_processor.post_process_masks( A_ ,A_ ,A_ ,A_ ,binarize=A_ ) A = model_outputs['iou_scores'] A , A , A = self.image_processor.filter_masks( masks[0] ,iou_scores[0] ,original_sizes[0] ,input_boxes[0] ,A_ ,A_ ,A_ ,A_ ,) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Any ,A_ : Optional[Any]=False ,A_ : int=False ,A_ : int=0.7 ,) -> List[Any]: A = [] A = [] A = [] for model_output in model_outputs: all_scores.append(model_output.pop('iou_scores' ) ) all_masks.extend(model_output.pop('masks' ) ) all_boxes.append(model_output.pop('boxes' ) ) A = torch.cat(A_ ) A = torch.cat(A_ ) A , A , A , A = self.image_processor.post_process_for_mask_generation( A_ ,A_ ,A_ ,A_ ) A = defaultdict(A_ ) for output in model_outputs: for k, v in output.items(): extra[k].append(A_ ) A = {} if output_rle_mask: A = rle_mask if output_bboxes_mask: A = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
74
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ,A_ : str ,A_ : Dict=13 ,A_ : str=7 ,A_ : str=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=False ,A_ : str=False ,A_ : Tuple=False ,A_ : str=2 ,A_ : Optional[int]=99 ,A_ : Union[str, Any]=0 ,A_ : Optional[Any]=32 ,A_ : Optional[int]=5 ,A_ : Optional[int]=4 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=0.1 ,A_ : Union[str, Any]=512 ,A_ : Union[str, Any]=2 ,A_ : Any=0.02 ,A_ : List[str]=2 ,A_ : int=4 ,A_ : int="last" ,A_ : Dict=True ,A_ : Union[str, Any]=None ,A_ : Any=0 ,) -> List[Any]: A = parent A = batch_size A = seq_length A = is_training A = use_input_lengths A = use_token_type_ids A = use_labels A = gelu_activation A = sinusoidal_embeddings A = causal A = asm A = n_langs A = vocab_size A = n_special A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = summary_type A = use_proj A = scope A = bos_token_id def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_input_lengths: A = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,2 ).float() A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: return XLMConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ,A_ : int ,A_ : Dict ,A_ : str ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : int ,A_ : str ,) -> Any: A = XLMModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,lengths=A_ ,langs=A_ ) A = model(A_ ,langs=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Any ,A_ : str ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : str ,A_ : Any ,A_ : str ,A_ : Dict ,) -> Dict: A = XLMWithLMHeadModel(A_ ) model.to(A_ ) model.eval() A = model(A_ ,token_type_ids=A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Any ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[Any] ,) -> int: A = XLMForQuestionAnsweringSimple(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,start_positions=A_ ,end_positions=A_ ) A = outputs self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Optional[int] ,A_ : Any ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Tuple ,A_ : List[str] ,A_ : Optional[int] ,) -> List[Any]: A = XLMForQuestionAnswering(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,) ((A) , ) = result_with_labels.to_tuple() A = model(A_ ,start_positions=A_ ,end_positions=A_ ) ((A) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape ,() ) self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : int ,A_ : Optional[int] ,A_ : List[str] ,A_ : str ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ,) -> Optional[int]: A = XLMForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ,A_ : str ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : Optional[int] ,) -> List[str]: A = self.num_labels A = XLMForTokenClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : Union[str, Any] ,A_ : Dict ,A_ : List[Any] ,) -> List[str]: A = self.num_choices A = XLMForMultipleChoice(config=A_ ) model.to(A_ ) model.eval() A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = model( A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _lowerCamelCase: str = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _lowerCamelCase: Optional[int] = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ) -> Any: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Optional[int] ,A_ : List[Any]=False ) -> int: A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = XLMModelTester(self ) A = ConfigTester(self ,config_class=A_ ,emb_dim=37 ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : Any ,A_ : str ,A_ : Tuple ,A_ : Any ,A_ : Any=False ,A_ : Any=1 ) -> List[Any]: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) ) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = min_length + idx + 1 A = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : str ,A_ : Optional[int] ,A_ : int ,A_ : Any ,A_ : str=False ,A_ : Any=1 ) -> Tuple: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,) pass @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = XLMModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(A_ ) A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president A = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A = model.generate(A_ ,do_sample=A_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ )
74
1
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers _lowercase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Tuple=None ): require_version(deps[pkg] , snake_case__ )
74
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf _lowercase = logging.get_logger(__name__) @dataclass class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : int ,**A_ : Any ) -> Any: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: A = deprecated_arg[3:] A = not kwargs.pop(A_ ) logger.warning( F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or' F' {positive_arg}={kwargs[positive_arg]}' ) A = kwargs.pop('tpu_name' ,self.tpu_name ) A = kwargs.pop('device_idx' ,self.device_idx ) A = kwargs.pop('eager_mode' ,self.eager_mode ) A = kwargs.pop('use_xla' ,self.use_xla ) super().__init__(**A_ ) _lowerCamelCase: str = field( default=_lowercase , metadata={'''help''': '''Name of TPU'''} , ) _lowerCamelCase: int = field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) _lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Benchmark models in eager model.'''} ) _lowerCamelCase: bool = field( default=_lowercase , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self ,['tf'] ) A = None if self.tpu: try: if self.tpu_name: A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: A = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: A = None return tpu @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self ,['tf'] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) A = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' ) A = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' ) else: tf.config.set_visible_devices([] ,'GPU' ) # disable GPU A = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' ) return strategy @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool: requires_backends(self ,['tf'] ) return self._setup_tpu is not None @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> "tf.distribute.Strategy": requires_backends(self ,['tf'] ) return self._setup_strategy @property def _SCREAMING_SNAKE_CASE ( self : int ) -> str: requires_backends(self ,['tf'] ) return tf.config.list_physical_devices('GPU' ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: requires_backends(self ,['tf'] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _SCREAMING_SNAKE_CASE ( self : str ) -> bool: return self.n_gpu > 0
74
1
"""simple docstring""" import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ): A = s.rsplit(snake_case__ , snake_case__ ) return new.join(snake_case__ ) def _snake_case ( snake_case__ : List[Any] ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() ) def _snake_case ( snake_case__ : Union[str, Any] ): A = {} A = ['group_1', 'group_2', 'group_3', 'group_4'] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: A = key.replace(F'{group_key}.' , F'{group_key}.group.' ) if "res_path" in key: A = key.replace('res_path.' , 'res_path.path.' ) if key.endswith('.w' ): A = rreplace(snake_case__ , '.w' , '.weight' , 1 ) if key.endswith('.b' ): A = rreplace(snake_case__ , '.b' , '.bias' , 1 ) A = value.float() return upgrade @torch.no_grad() def _snake_case ( snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Tuple=None , snake_case__ : str=True ): from dall_e import Encoder A = Encoder() if os.path.exists(snake_case__ ): A = torch.load(snake_case__ ) else: A = torch.hub.load_state_dict_from_url(snake_case__ ) if isinstance(snake_case__ , snake_case__ ): A = ckpt.state_dict() encoder.load_state_dict(snake_case__ ) if config_path is not None: A = FlavaImageCodebookConfig.from_pretrained(snake_case__ ) else: A = FlavaImageCodebookConfig() A = FlavaImageCodebook(snake_case__ ).eval() A = encoder.state_dict() A = upgrade_state_dict(snake_case__ ) hf_model.load_state_dict(snake_case__ ) A = hf_model.state_dict() A = count_parameters(snake_case__ ) A = count_parameters(snake_case__ ) assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(snake_case__ ) else: return hf_state_dict if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') _lowercase = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
74
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig _lowercase = logging.get_logger(__name__) _lowercase = { '''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''', # See all DPT models at https://huggingface.co/models?filter=dpt } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Tuple = '''dpt''' def __init__( self : str ,A_ : Tuple=768 ,A_ : int=12 ,A_ : Optional[int]=12 ,A_ : Optional[int]=3072 ,A_ : List[str]="gelu" ,A_ : str=0.0 ,A_ : int=0.0 ,A_ : str=0.02 ,A_ : str=1e-12 ,A_ : str=384 ,A_ : Dict=16 ,A_ : Union[str, Any]=3 ,A_ : Dict=False ,A_ : Any=True ,A_ : Optional[int]=[2, 5, 8, 11] ,A_ : Optional[Any]="project" ,A_ : Tuple=[4, 2, 1, 0.5] ,A_ : int=[96, 192, 384, 768] ,A_ : int=256 ,A_ : str=-1 ,A_ : Optional[int]=False ,A_ : Optional[int]=True ,A_ : Union[str, Any]=0.4 ,A_ : Union[str, Any]=255 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=[1, 1024, 24, 24] ,A_ : List[str]=[0, 1] ,A_ : List[Any]=None ,**A_ : Tuple ,) -> Union[str, Any]: super().__init__(**A_ ) A = hidden_size A = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('Initializing the config with a `BiT` backbone.' ) A = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, } A = BitConfig(**A_ ) elif isinstance(A_ ,A_ ): logger.info('Initializing the config with a `BiT` backbone.' ) A = BitConfig(**A_ ) elif isinstance(A_ ,A_ ): A = backbone_config else: raise ValueError( F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' ) A = backbone_featmap_shape A = neck_ignore_stages if readout_type != "project": raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' ) else: A = None A = None A = [] A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' ) A = readout_type A = reassemble_factors A = neck_hidden_sizes A = fusion_hidden_size A = head_in_index A = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) A = use_auxiliary_head A = auxiliary_loss_weight A = semantic_loss_ignore_index A = semantic_classifier_dropout def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: A = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: A = self.backbone_config.to_dict() A = self.__class__.model_type return output
74
1
"""simple docstring""" from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 _lowercase = { # 1536-bit 5: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 2048-bit 14: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 3072-bit 15: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 4096-bit 16: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199''' + '''FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 6144-bit 17: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08''' + '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B''' + '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9''' + '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6''' + '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8''' + '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C''' + '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718''' + '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D''' + '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D''' + '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226''' + '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC''' + '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26''' + '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB''' + '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2''' + '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127''' + '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406''' + '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918''' + '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151''' + '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03''' + '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F''' + '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B''' + '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632''' + '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E''' + '''6DCC4024FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 8192-bit 18: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD''' + '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831''' + '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B''' + '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF''' + '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6''' + '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3''' + '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328''' + '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C''' + '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE''' + '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4''' + '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300''' + '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568''' + '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9''' + '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B''' + '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A''' + '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36''' + '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1''' + '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92''' + '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47''' + '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71''' + '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, } class lowerCAmelCase_ : '''simple docstring''' def __init__( self : str ,A_ : int = 14 ) -> None: if group not in primes: raise ValueError('Unsupported Group' ) A = primes[group]['prime'] A = primes[group]['generator'] A = int(hexlify(urandom(32 ) ) ,base=16 ) def _SCREAMING_SNAKE_CASE ( self : int ) -> str: return hex(self.__private_key )[2:] def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: A = pow(self.generator ,self.__private_key ,self.prime ) return hex(A_ )[2:] def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : int ) -> bool: # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(A_ ,(self.prime - 1) // 2 ,self.prime ) == 1 ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : str ) -> str: A = int(A_ ,base=16 ) if not self.is_valid_public_key(A_ ): raise ValueError('Invalid public key' ) A = pow(A_ ,self.__private_key ,self.prime ) return shaaaa(str(A_ ).encode() ).hexdigest() @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : int ,A_ : int ) -> bool: # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(A_ ,(prime - 1) // 2 ,A_ ) == 1 ) @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : str ,A_ : str ,A_ : int = 14 ) -> str: A = int(A_ ,base=16 ) A = int(A_ ,base=16 ) A = primes[group]['prime'] if not DiffieHellman.is_valid_public_key_static(A_ ,A_ ): raise ValueError('Invalid public key' ) A = pow(A_ ,A_ ,A_ ) return shaaaa(str(A_ ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
74
"""simple docstring""" from __future__ import annotations import math _lowercase = '''2020.9.26''' _lowercase = '''xcodz-dot, cclaus, dhruvmanila''' def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float ): if not all(isinstance(snake_case__ , (float, int) ) for val in locals().values() ): A = F'Input values must either be float or int: {list(locals().values() )}' raise TypeError(snake_case__ ) A = ((x * distance) / (z + distance)) * scale A = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : str , snake_case__ : float ): if not isinstance(snake_case__ , snake_case__ ): raise TypeError('Axis must be a str' ) A = locals() del input_variables["axis"] if not all(isinstance(snake_case__ , (float, int) ) for val in input_variables.values() ): A = ( 'Input values except axis must either be float or int: ' F'{list(input_variables.values() )}' ) raise TypeError(snake_case__ ) A = (angle % 360) / 450 * 180 / math.pi if axis == "z": A = x * math.cos(snake_case__ ) - y * math.sin(snake_case__ ) A = y * math.cos(snake_case__ ) + x * math.sin(snake_case__ ) A = z elif axis == "x": A = y * math.cos(snake_case__ ) - z * math.sin(snake_case__ ) A = z * math.cos(snake_case__ ) + y * math.sin(snake_case__ ) A = x elif axis == "y": A = x * math.cos(snake_case__ ) - z * math.sin(snake_case__ ) A = z * math.cos(snake_case__ ) + x * math.sin(snake_case__ ) A = y else: raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""") print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
74
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def _snake_case ( snake_case__ : Dict ): # initialize config if "resnet-50" in model_name: A = ResNetConfig.from_pretrained('microsoft/resnet-50' ) elif "resnet-101" in model_name: A = ResNetConfig.from_pretrained('microsoft/resnet-101' ) else: raise ValueError('Model name should include either resnet50 or resnet101' ) A = DetrConfig(use_timm_backbone=snake_case__ , backbone_config=snake_case__ ) # set label attributes A = 'panoptic' in model_name if is_panoptic: A = 250 else: A = 91 A = 'huggingface/label-files' A = 'coco-detection-id2label.json' A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) ) A = {int(snake_case__ ): v for k, v in idalabel.items()} A = idalabel A = {v: k for k, v in idalabel.items()} return config, is_panoptic def _snake_case ( snake_case__ : int ): # here we list all keys to be renamed (original name on the left, our name on the right) A = [] # stem # fmt: off rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') ) rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') ) rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') ) rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') ) rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight', ) ) rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight', ) ) rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias', ) ) rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean', ) ) rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var', ) ) # 3 convs for i in range(3 ): rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight', ) ) rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight', ) ) rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias', ) ) rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean', ) ) rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var', ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight', ) ) rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias') ) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias') ) rename_keys.append( (F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias') ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight', ) ) rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', F'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', F'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ] ) return rename_keys def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ): A = state_dict.pop(snake_case__ ) A = val def _snake_case ( snake_case__ : int , snake_case__ : Optional[int]=False ): A = '' if is_panoptic: A = 'detr.' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) A = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) A = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A = in_proj_weight[:256, :] A = in_proj_bias[:256] A = in_proj_weight[256:512, :] A = in_proj_bias[256:512] A = in_proj_weight[-256:, :] A = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention A = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) A = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A = in_proj_weight[:256, :] A = in_proj_bias[:256] A = in_proj_weight[256:512, :] A = in_proj_bias[256:512] A = in_proj_weight[-256:, :] A = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention A = state_dict.pop( F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) A = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict A = in_proj_weight_cross_attn[:256, :] A = in_proj_bias_cross_attn[:256] A = in_proj_weight_cross_attn[256:512, :] A = in_proj_bias_cross_attn[256:512] A = in_proj_weight_cross_attn[-256:, :] A = in_proj_bias_cross_attn[-256:] def _snake_case ( ): A = 'http://images.cocodataset.org/val2017/000000039769.jpg' A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im @torch.no_grad() def _snake_case ( snake_case__ : Any , snake_case__ : List[Any]=None , snake_case__ : int=False ): A , A = get_detr_config(snake_case__ ) # load original model from torch hub A = { 'detr-resnet-50': 'detr_resnet50', 'detr-resnet-101': 'detr_resnet101', } logger.info(F'Converting model {model_name}...' ) A = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=snake_case__ ).eval() A = detr.state_dict() # rename keys for src, dest in create_rename_keys(snake_case__ ): if is_panoptic: A = 'detr.' + src rename_key(snake_case__ , snake_case__ , snake_case__ ) # query, key and value matrices need special treatment read_in_q_k_v(snake_case__ , is_panoptic=snake_case__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them A = 'detr.model.' if is_panoptic else 'model.' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('detr' ) and not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ) ): A = state_dict.pop(snake_case__ ) A = val elif "class_labels_classifier" in key or "bbox_predictor" in key: A = state_dict.pop(snake_case__ ) A = val elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ): continue else: A = state_dict.pop(snake_case__ ) A = val else: if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): A = state_dict.pop(snake_case__ ) A = val # finally, create HuggingFace model and load state dict A = DetrForSegmentation(snake_case__ ) if is_panoptic else DetrForObjectDetection(snake_case__ ) model.load_state_dict(snake_case__ ) model.eval() # verify our conversion on an image A = 'coco_panoptic' if is_panoptic else 'coco_detection' A = DetrImageProcessor(format=snake_case__ ) A = processor(images=prepare_img() , return_tensors='pt' ) A = encoding['pixel_values'] A = detr(snake_case__ ) A = model(snake_case__ ) assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) model.save_pretrained(snake_case__ ) processor.save_pretrained(snake_case__ ) if push_to_hub: # Upload model and image processor to the hub logger.info('Uploading PyTorch model and image processor to the hub...' ) model.push_to_hub(F'nielsr/{model_name}' ) processor.push_to_hub(F'nielsr/{model_name}' ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''detr-resnet-50''', type=str, choices=['''detr-resnet-50''', '''detr-resnet-101'''], help='''Name of the DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''') _lowercase = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
"""simple docstring""" class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int ,A_ : int ) -> Union[str, Any]: A = n A = [None] * self.n A = 0 # index of the first element A = 0 A = 0 def __len__( self : int ) -> int: return self.size def _SCREAMING_SNAKE_CASE ( self : Any ) -> bool: return self.size == 0 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: return False if self.is_empty() else self.array[self.front] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> int: if self.size >= self.n: raise Exception('QUEUE IS FULL' ) A = data A = (self.rear + 1) % self.n self.size += 1 return self def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: if self.size == 0: raise Exception('UNDERFLOW' ) A = self.array[self.front] A = None A = (self.front + 1) % self.n self.size -= 1 return temp
74
1
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin _lowercase = ''' Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] ''' class lowerCAmelCase_ ( unittest.TestCase , _lowercase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: A = load_tool('text-question-answering' ) self.tool.setup() A = load_tool('text-question-answering' ,remote=A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Any: A = self.tool(A_ ,'What did Hugging Face do in April 2021?' ) self.assertEqual(A_ ,'launched the BigScience Research Workshop' ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = self.remote_tool(A_ ,'What did Hugging Face do in April 2021?' ) self.assertEqual(A_ ,'launched the BigScience Research Workshop' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = self.tool(text=A_ ,question='What did Hugging Face do in April 2021?' ) self.assertEqual(A_ ,'launched the BigScience Research Workshop' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: A = self.remote_tool(text=A_ ,question='What did Hugging Face do in April 2021?' ) self.assertEqual(A_ ,'launched the BigScience Research Workshop' )
74
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _lowercase = logging.get_logger(__name__) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Union[str, Any] ,*A_ : List[str] ,**A_ : int ) -> None: warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' ,A_ ,) super().__init__(*A_ ,**A_ )
74
1
"""simple docstring""" from string import ascii_uppercase _lowercase = {char: i for i, char in enumerate(ascii_uppercase)} _lowercase = dict(enumerate(ascii_uppercase)) def _snake_case ( snake_case__ : str , snake_case__ : str ): A = len(snake_case__ ) A = 0 while True: if x == i: A = 0 if len(snake_case__ ) == len(snake_case__ ): break key += key[i] i += 1 return key def _snake_case ( snake_case__ : str , snake_case__ : str ): A = '' A = 0 for letter in message: if letter == " ": cipher_text += " " else: A = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def _snake_case ( snake_case__ : str , snake_case__ : str ): A = '' A = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: A = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def _snake_case ( ): A = 'THE GERMAN ATTACK' A = 'SECRET' A = generate_key(snake_case__ , snake_case__ ) A = cipher_text(snake_case__ , snake_case__ ) print(F'Encrypted Text = {s}' ) print(F'Original Text = {original_text(snake_case__ , snake_case__ )}' ) if __name__ == "__main__": import doctest doctest.testmod() main()
74
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = '''gpt_bigcode''' _lowerCamelCase: List[Any] = ['''past_key_values'''] _lowerCamelCase: int = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Optional[int] ,A_ : Dict=5_0257 ,A_ : Union[str, Any]=1024 ,A_ : str=768 ,A_ : Any=12 ,A_ : Any=12 ,A_ : Optional[int]=None ,A_ : Any="gelu_pytorch_tanh" ,A_ : List[str]=0.1 ,A_ : Optional[int]=0.1 ,A_ : List[str]=0.1 ,A_ : Tuple=1e-5 ,A_ : Optional[int]=0.02 ,A_ : List[str]=True ,A_ : Optional[Any]=True ,A_ : List[Any]=5_0256 ,A_ : Union[str, Any]=5_0256 ,A_ : int=True ,A_ : Optional[Any]=True ,A_ : Dict=True ,**A_ : Union[str, Any] ,) -> Union[str, Any]: A = vocab_size A = n_positions A = n_embd A = n_layer A = n_head A = n_inner A = activation_function A = resid_pdrop A = embd_pdrop A = attn_pdrop A = layer_norm_epsilon A = initializer_range A = scale_attn_weights A = use_cache A = attention_softmax_in_fpaa A = scale_attention_softmax_in_fpaa A = multi_query A = bos_token_id A = eos_token_id super().__init__(bos_token_id=A_ ,eos_token_id=A_ ,**A_ )
74
1
"""simple docstring""" # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _lowercase = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[Union[str, int]] = None _lowerCamelCase: Optional[Union[str, int]] = None _lowerCamelCase: Optional[Union[str, int]] = None def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: A , A , A = _str_to_version_tuple(self.version_str ) def __repr__( self : Optional[int] ) -> Dict: return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: return self.major, self.minor, self.patch def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ) -> Union[str, Any]: if isinstance(A_ ,A_ ): return Version(A_ ) elif isinstance(A_ ,A_ ): return other raise TypeError(F'{other} (type {type(A_ )}) cannot be compared to version.' ) def __eq__( self : List[Any] ,A_ : Dict ) -> Any: try: A = self._validate_operand(A_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : List[Any] ,A_ : Optional[int] ) -> Tuple: A = self._validate_operand(A_ ) return self.tuple < other.tuple def __hash__( self : Union[str, Any] ) -> Union[str, Any]: return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : List[str] ) -> List[str]: A = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self.version_str def _snake_case ( snake_case__ : List[str] ): A = _VERSION_REG.match(snake_case__ ) if not res: raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] ) def _snake_case ( snake_case__ : str ): return ".".join(str(snake_case__ ) for v in version_tuple )
74
"""simple docstring""" import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed _lowercase = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) _lowercase = '''sshleifer/student_marian_en_ro_6_1''' _lowercase = '''sshleifer/tiny-mbart''' @require_torch class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any]=False ,A_ : Optional[int]=None ,A_ : List[str]=True ,A_ : Tuple=True ,A_ : Union[str, Any]=True ,A_ : List[str]=True ,) -> Tuple: A = self.run_trainer( eval_steps=1 ,max_len=12 ,model_name=A_ ,num_train_epochs=1 ,distributed=A_ ,extra_args_str=A_ ,predict_with_generate=A_ ,do_train=A_ ,do_eval=A_ ,do_predict=A_ ,) A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history if not do_eval: return A = [log for log in logs if 'eval_loss' in log.keys()] A = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats A = eval_metrics[-1] assert isinstance(last_step_stats['eval_bleu'] ,A_ ) assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: self.run_seqaseq_quick() @require_torch_multi_gpu def _SCREAMING_SNAKE_CASE ( self : int ) -> int: self.run_seqaseq_quick(distributed=A_ ) @require_torch_multi_gpu def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: self.run_seqaseq_quick(distributed=A_ ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple' ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple --fp16' ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2' ,predict_with_generate=A_ ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: self.run_seqaseq_quick( distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2 --fp16' ,predict_with_generate=A_ ) @require_apex @require_torch_gpu def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' ) @parameterized.expand(['base', 'low', 'high', 'mixed'] ) @require_torch_multi_gpu def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Dict ) -> List[str]: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout A = { # test with the default log_level - should be info and thus log info once 'base': {'extra_args_str': '', 'n_matches': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes 'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica 'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1}, # test with high log_level and log_level_replica - should be quiet on all processes 'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0}, } A = experiments[experiment_id] A = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False} A = 'Running training' with CaptureStderr() as cl: self.run_seqaseq_quick(**A_ ,extra_args_str=data['extra_args_str'] ) A = len(re.findall(A_ ,cl.err ) ) self.assertEqual(A_ ,data['n_matches'] ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: A = self.run_trainer( eval_steps=2 ,max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=10 ,distributed=A_ ,) # Check metrics A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history A = [log for log in logs if 'eval_loss' in log.keys()] A = eval_metrics[0] A = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['eval_bleu'] ,A_ ) # test if do_predict saves generations and metrics A = os.listdir(A_ ) A = {os.path.basename(A_ ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: from transformers.training_args import OptimizerNames def train_and_return_metrics(A_ : str ) -> Tuple[int, float]: A = '--skip_memory_metrics 0' A = self.run_trainer( max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=1 ,optim=A_ ,distributed=A_ ,extra_args_str=A_ ,do_eval=A_ ,do_predict=A_ ,n_gpus_to_use=1 ,) # Check metrics A = TrainerState.load_from_json(Path(A_ ,'trainer_state.json' ) ).log_history A = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 ) A = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 ) A = logs[0]['train_loss'] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) A = gpu_alloc_mem_orig - gpu_alloc_mem_bnb A = gpu_peak_mem_orig + gpu_alloc_mem_orig A = gpu_peak_mem_bnb + gpu_alloc_mem_bnb A = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings A = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( A_ ,A_ ,'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got' F' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and' F' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' ,) self.assertGreater( A_ ,A_ ,'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got' F' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and' F' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' ,) self.assertEqual( A_ ,A_ ,F'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ,A_ : str ,A_ : int ,A_ : float = 3e-3 ,A_ : str = "adafactor" ,A_ : bool = False ,A_ : str = None ,A_ : int = 0 ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : int = None ,) -> Dict: A = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro' A = self.get_auto_remove_tmp_dir() A = F'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split() A = F'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A_ )}\n '.split() A = '\n --do_predict\n '.split() A = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F'--optim {optim}'.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: A = get_gpu_count() A = get_torch_dist_unique_port() A = F'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split() A = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(A_ ,env=self.get_env() ) else: A = ['run_translation.py'] + args with patch.object(A_ ,'argv' ,A_ ): main() return output_dir
74
1
"""simple docstring""" import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch _lowercase = '''sshleifer/bart-tiny-random''' _lowercase = '''patrickvonplaten/t5-tiny-random''' @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: return AutoConfig.from_pretrained(A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: A , *A = create_student_by_copying_alternating_layers(A_ ,tempfile.mkdtemp() ,e=1 ,d=1 ) self.assertEqual(student.config.num_hidden_layers ,1 ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: A , *A = create_student_by_copying_alternating_layers(A_ ,tempfile.mkdtemp() ,e=1 ,d=A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: A , *A = create_student_by_copying_alternating_layers(A_ ,tempfile.mkdtemp() ,e=1 ,d=A_ ) self.assertEqual(student.config.encoder_layers ,1 ) self.assertEqual(student.config.decoder_layers ,self.teacher_config.encoder_layers ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: A , *A = create_student_by_copying_alternating_layers(A_ ,tempfile.mkdtemp() ,e=1 ,d=1 ) self.assertEqual(student.config.encoder_layers ,1 ) self.assertEqual(student.config.decoder_layers ,1 ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: with self.assertRaises(A_ ): create_student_by_copying_alternating_layers(A_ ,tempfile.mkdtemp() ,e=A_ ,d=A_ )
74
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''facebook/deit-base-distilled-patch16-224''': ( '''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json''' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''deit''' def __init__( self : int ,A_ : Optional[Any]=768 ,A_ : Union[str, Any]=12 ,A_ : Dict=12 ,A_ : int=3072 ,A_ : Optional[Any]="gelu" ,A_ : Dict=0.0 ,A_ : Any=0.0 ,A_ : str=0.02 ,A_ : Tuple=1e-12 ,A_ : Union[str, Any]=224 ,A_ : Optional[Any]=16 ,A_ : List[Any]=3 ,A_ : Optional[Any]=True ,A_ : Optional[int]=16 ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(**A_ ) A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = encoder_stride class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: int = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> float: return 1e-4
74
1
"""simple docstring""" # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : int ,A_ : Tuple ,A_ : List[str] ) -> Tuple: super().__init__() self.register_modules(unet=A_ ,scheduler=A_ ) @torch.no_grad() def __call__( self : Optional[Any] ,A_ : int = 1 ,A_ : Optional[torch.Generator] = None ,A_ : int = 50 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Tuple ,) -> Union[ImagePipelineOutput, Tuple]: A = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) ,generator=A_ ,) A = image.to(self.device ) # set step values self.scheduler.set_timesteps(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A = self.unet(A_ ,A_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample A = (image / 2 + 0.5).clamp(0 ,1 ) A = image.cpu().permute(0 ,2 ,3 ,1 ).numpy() if output_type == "pil": A = self.numpy_to_pil(A_ ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=A_ ), "This is a local test"
74
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int]=0.999 , snake_case__ : Union[str, Any]="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case__ : Union[str, Any] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case__ : Dict ): return math.exp(t * -12.0 ) else: raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' ) A = [] for i in range(snake_case__ ): A = i / num_diffusion_timesteps A = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) ) return torch.tensor(snake_case__ , dtype=torch.floataa ) class lowerCAmelCase_ ( _lowercase , _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = [e.name for e in KarrasDiffusionSchedulers] _lowerCamelCase: Optional[Any] = 2 @register_to_config def __init__( self : str ,A_ : int = 1000 ,A_ : float = 0.0_00_85 ,A_ : float = 0.0_12 ,A_ : str = "linear" ,A_ : Optional[Union[np.ndarray, List[float]]] = None ,A_ : str = "epsilon" ,A_ : Optional[bool] = False ,A_ : Optional[bool] = False ,A_ : float = 1.0 ,A_ : str = "linspace" ,A_ : int = 0 ,) -> List[str]: if trained_betas is not None: A = torch.tensor(A_ ,dtype=torch.floataa ) elif beta_schedule == "linear": A = torch.linspace(A_ ,A_ ,A_ ,dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. A = ( torch.linspace(beta_start**0.5 ,beta_end**0.5 ,A_ ,dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule A = betas_for_alpha_bar(A_ ,alpha_transform_type='cosine' ) elif beta_schedule == "exp": A = betas_for_alpha_bar(A_ ,alpha_transform_type='exp' ) else: raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' ) A = 1.0 - self.betas A = torch.cumprod(self.alphas ,dim=0 ) # set all values self.set_timesteps(A_ ,A_ ,A_ ) A = use_karras_sigmas def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : Tuple=None ) -> Tuple: if schedule_timesteps is None: A = self.timesteps A = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: A = 1 if len(A_ ) > 1 else 0 else: A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep A = self._index_counter[timestep_int] return indices[pos].item() @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : torch.FloatTensor ,A_ : Union[float, torch.FloatTensor] ,) -> torch.FloatTensor: A = self.index_for_timestep(A_ ) A = self.sigmas[step_index] A = sample / ((sigma**2 + 1) ** 0.5) return sample def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : Union[str, torch.device] = None ,A_ : Optional[int] = None ,) -> Optional[Any]: A = num_inference_steps A = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": A = np.linspace(0 ,num_train_timesteps - 1 ,A_ ,dtype=A_ )[::-1].copy() elif self.config.timestep_spacing == "leading": A = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 A = (np.arange(0 ,A_ ) * step_ratio).round()[::-1].copy().astype(A_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": A = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 A = (np.arange(A_ ,0 ,-step_ratio )).round().copy().astype(A_ ) timesteps -= 1 else: raise ValueError( F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' ) A = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) A = np.log(A_ ) A = np.interp(A_ ,np.arange(0 ,len(A_ ) ) ,A_ ) if self.config.use_karras_sigmas: A = self._convert_to_karras(in_sigmas=A_ ,num_inference_steps=self.num_inference_steps ) A = np.array([self._sigma_to_t(A_ ,A_ ) for sigma in sigmas] ) A = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) A = torch.from_numpy(A_ ).to(device=A_ ) A = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) A = torch.from_numpy(A_ ) A = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(A_ ).startswith('mps' ): # mps does not support float64 A = timesteps.to(A_ ,dtype=torch.floataa ) else: A = timesteps.to(device=A_ ) # empty dt and derivative A = None A = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter A = defaultdict(A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : List[str] ) -> Dict: # get log sigma A = np.log(A_ ) # get distribution A = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range A = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) A = low_idx + 1 A = log_sigmas[low_idx] A = log_sigmas[high_idx] # interpolate sigmas A = (low - log_sigma) / (low - high) A = np.clip(A_ ,0 ,1 ) # transform interpolation to time range A = (1 - w) * low_idx + w * high_idx A = t.reshape(sigma.shape ) return t def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : torch.FloatTensor ,A_ : int ) -> torch.FloatTensor: A = in_sigmas[-1].item() A = in_sigmas[0].item() A = 7.0 # 7.0 is the value used in the paper A = np.linspace(0 ,1 ,A_ ) A = sigma_min ** (1 / rho) A = sigma_max ** (1 / rho) A = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: return self.dt is None def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : Union[float, torch.FloatTensor] ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : bool = True ,) -> Union[SchedulerOutput, Tuple]: A = self.index_for_timestep(A_ ) # advance index counter by 1 A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: A = self.sigmas[step_index] A = self.sigmas[step_index + 1] else: # 2nd order / Heun's method A = self.sigmas[step_index - 1] A = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API A = 0 A = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": A = sigma_hat if self.state_in_first_order else sigma_next A = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": A = sigma_hat if self.state_in_first_order else sigma_next A = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": A = model_output else: raise ValueError( F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' ) if self.config.clip_sample: A = pred_original_sample.clamp( -self.config.clip_sample_range ,self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order A = (sample - pred_original_sample) / sigma_hat # 3. delta timestep A = sigma_next - sigma_hat # store for 2nd order step A = derivative A = dt A = sample else: # 2. 2nd order / Heun's method A = (sample - pred_original_sample) / sigma_next A = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample A = self.dt A = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" A = None A = None A = None A = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=A_ ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples A = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(A_ ): # mps does not support float64 A = self.timesteps.to(original_samples.device ,dtype=torch.floataa ) A = timesteps.to(original_samples.device ,dtype=torch.floataa ) else: A = self.timesteps.to(original_samples.device ) A = timesteps.to(original_samples.device ) A = [self.index_for_timestep(A_ ,A_ ) for t in timesteps] A = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): A = sigma.unsqueeze(-1 ) A = original_samples + noise * sigma return noisy_samples def __len__( self : Dict ) -> int: return self.config.num_train_timesteps
74
1
"""simple docstring""" from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer _lowercase = logging.get_logger(__name__) _lowercase = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } _lowercase = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } _lowercase = { '''facebook/blenderbot_small-90M''': 5_12, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = VOCAB_FILES_NAMES _lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: List[str] = BlenderbotSmallTokenizer def __init__( self : Dict ,A_ : Optional[Any]=None ,A_ : List[Any]=None ,A_ : List[Any]="<|endoftext|>" ,A_ : int="<|endoftext|>" ,A_ : Optional[Any]="<|endoftext|>" ,A_ : Optional[int]=False ,A_ : Dict=True ,**A_ : Optional[int] ,) -> int: super().__init__( ByteLevelBPETokenizer( vocab=A_ ,merges=A_ ,add_prefix_space=A_ ,trim_offsets=A_ ,) ,bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,**A_ ,) A = add_prefix_space def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Union[str, Any]=None ) -> Tuple: A = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
74
"""simple docstring""" class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Dict ,A_ : list[int] ) -> None: A = len(A_ ) A = [0] * len_array if len_array > 0: A = array[0] for i in range(1 ,A_ ): A = self.prefix_sum[i - 1] + array[i] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> int: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ) -> bool: A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(A_ ) return False if __name__ == "__main__": import doctest doctest.testmod()
74
1
"""simple docstring""" import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _lowercase = logging.getLogger(__name__) _lowercase = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) _lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Leave None if you want to train a model from''' ''' scratch.''' ) } , ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_lowercase )} , ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''The input training data file (a text file).'''} ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={ '''help''': ( '''The input training data files (multiple files in glob format). ''' '''Very often splitting large files to smaller files can prevent tokenizer going out of memory''' ) } , ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , ) _lowerCamelCase: bool = field( default=_lowercase , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , ) _lowerCamelCase: bool = field( default=_lowercase , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} ) _lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Whether ot not to use whole word mask.'''} ) _lowerCamelCase: float = field( default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} ) _lowerCamelCase: float = field( default=1 / 6 , metadata={ '''help''': ( '''Ratio of length of a span of masked tokens to surrounding context length for permutation language''' ''' modeling.''' ) } , ) _lowerCamelCase: int = field( default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} ) _lowerCamelCase: int = field( default=-1 , metadata={ '''help''': ( '''Optional input sequence length after tokenization.''' '''The training dataset will be truncated in block of this size for training.''' '''Default to the model max input length for single sentence inputs (take into account special tokens).''' ) } , ) _lowerCamelCase: bool = field( default=_lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def _snake_case ( snake_case__ : DataTrainingArguments , snake_case__ : PreTrainedTokenizer , snake_case__ : bool = False , snake_case__ : Optional[str] = None , ): def _dataset(snake_case__ : Tuple , snake_case__ : Union[str, Any]=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' ) return LineByLineWithRefDataset( tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size , ref_path=snake_case__ , ) return LineByLineTextDataset(tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size ) else: return TextDataset( tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=snake_case__ , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(snake_case__ ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) A , A , A = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( 'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file ' 'or remove the --do_eval argument.' ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , snake_case__ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: A = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: A = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: A = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.tokenizer_name: A = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: A = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another' ' script, save it,and load it from here, using --tokenizer_name' ) if model_args.model_name_or_path: A = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , ) else: logger.info('Training new model from scratch' ) A = AutoModelWithLMHead.from_config(snake_case__ ) model.resize_token_embeddings(len(snake_case__ ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( 'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the' '--mlm flag (masked language modeling).' ) if data_args.block_size <= 0: A = tokenizer.max_len # Our input block size will be the max possible for the model else: A = min(data_args.block_size , tokenizer.max_len ) # Get datasets A = ( get_dataset(snake_case__ , tokenizer=snake_case__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) A = ( get_dataset(snake_case__ , tokenizer=snake_case__ , evaluate=snake_case__ , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": A = DataCollatorForPermutationLanguageModeling( tokenizer=snake_case__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: A = DataCollatorForWholeWordMask( tokenizer=snake_case__ , mlm_probability=data_args.mlm_probability ) else: A = DataCollatorForLanguageModeling( tokenizer=snake_case__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer A = Trainer( model=snake_case__ , args=snake_case__ , data_collator=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , prediction_loss_only=snake_case__ , ) # Training if training_args.do_train: A = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=snake_case__ ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation A = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) A = trainer.evaluate() A = math.exp(eval_output['eval_loss'] ) A = {'perplexity': perplexity} A = os.path.join(training_args.output_dir , 'eval_results_lm.txt' ) if trainer.is_world_master(): with open(snake_case__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , snake_case__ , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) results.update(snake_case__ ) return results def _snake_case ( snake_case__ : Dict ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
74
"""simple docstring""" import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def _snake_case ( snake_case__ : str , snake_case__ : str ): A = RobertaPreLayerNormConfig.from_pretrained( snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] ) # convert state_dict A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) ) A = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.' ): A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ): continue A = tensor_value A = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ ) model.save_pretrained(snake_case__ ) # convert tokenizer A = AutoTokenizer.from_pretrained(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowercase = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
74
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: int = ['''torch'''] def __init__( self : Union[str, Any] ,*A_ : Tuple ,**A_ : Optional[int] ) -> Any: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Optional[int] ,**A_ : Tuple ) -> Optional[int]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : List[Any] ,**A_ : Dict ) -> int: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: List[Any] = ['''torch'''] def __init__( self : Optional[int] ,*A_ : Optional[Any] ,**A_ : Union[str, Any] ) -> Optional[int]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : List[Any] ,**A_ : List[str] ) -> List[str]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Union[str, Any] ,**A_ : Dict ) -> List[Any]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: List[Any] = ['''torch'''] def __init__( self : Any ,*A_ : List[Any] ,**A_ : Dict ) -> Any: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Dict ,**A_ : Tuple ) -> List[str]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : str ,**A_ : List[str] ) -> List[Any]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = ['''torch'''] def __init__( self : Any ,*A_ : Union[str, Any] ,**A_ : List[Any] ) -> Optional[Any]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Union[str, Any] ,**A_ : Union[str, Any] ) -> List[Any]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : List[str] ,**A_ : List[Any] ) -> Tuple: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Tuple = ['''torch'''] def __init__( self : Optional[int] ,*A_ : str ,**A_ : List[str] ) -> List[str]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Optional[Any] ,**A_ : List[Any] ) -> List[Any]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : int ,**A_ : List[Any] ) -> Tuple: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: List[Any] = ['''torch'''] def __init__( self : str ,*A_ : Optional[Any] ,**A_ : Optional[int] ) -> List[Any]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : List[str] ,**A_ : Optional[int] ) -> Optional[Any]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : int ,**A_ : int ) -> str: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: List[Any] = ['''torch'''] def __init__( self : List[str] ,*A_ : Union[str, Any] ,**A_ : Optional[int] ) -> int: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : List[Any] ,**A_ : str ) -> int: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Union[str, Any] ,**A_ : Union[str, Any] ) -> Tuple: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = ['''torch'''] def __init__( self : List[Any] ,*A_ : int ,**A_ : int ) -> Optional[Any]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : int ,**A_ : Tuple ) -> List[str]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : Optional[Any] ,**A_ : Tuple ) -> Optional[Any]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Tuple = ['''torch'''] def __init__( self : List[str] ,*A_ : int ,**A_ : str ) -> Union[str, Any]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : List[Any] ,**A_ : Optional[int] ) -> Any: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : Tuple ,**A_ : Any ) -> Any: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = ['''torch'''] def __init__( self : Any ,*A_ : Optional[Any] ,**A_ : List[Any] ) -> Optional[Any]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : str ,**A_ : List[str] ) -> Dict: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : int ,**A_ : Any ) -> int: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: List[Any] = ['''torch'''] def __init__( self : Tuple ,*A_ : Tuple ,**A_ : Tuple ) -> Any: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Union[str, Any] ,**A_ : Tuple ) -> Tuple: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Union[str, Any] ,**A_ : Dict ) -> str: requires_backends(cls ,['torch'] ) def _snake_case ( *snake_case__ : Any , **snake_case__ : Dict ): requires_backends(snake_case__ , ['torch'] ) def _snake_case ( *snake_case__ : Optional[int] , **snake_case__ : List[Any] ): requires_backends(snake_case__ , ['torch'] ) def _snake_case ( *snake_case__ : str , **snake_case__ : str ): requires_backends(snake_case__ , ['torch'] ) def _snake_case ( *snake_case__ : int , **snake_case__ : List[Any] ): requires_backends(snake_case__ , ['torch'] ) def _snake_case ( *snake_case__ : List[str] , **snake_case__ : Any ): requires_backends(snake_case__ , ['torch'] ) def _snake_case ( *snake_case__ : Any , **snake_case__ : Any ): requires_backends(snake_case__ , ['torch'] ) def _snake_case ( *snake_case__ : int , **snake_case__ : Tuple ): requires_backends(snake_case__ , ['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = ['''torch'''] def __init__( self : Optional[Any] ,*A_ : Optional[int] ,**A_ : Dict ) -> Dict: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : Any ,**A_ : List[Any] ) -> Any: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : Optional[int] ,**A_ : Union[str, Any] ) -> List[str]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = ['''torch'''] def __init__( self : Tuple ,*A_ : List[str] ,**A_ : List[Any] ) -> Optional[Any]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Optional[int] ,**A_ : Union[str, Any] ) -> List[Any]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : Tuple ,**A_ : Union[str, Any] ) -> Tuple: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Dict = ['''torch'''] def __init__( self : str ,*A_ : Union[str, Any] ,**A_ : Any ) -> Tuple: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : Optional[Any] ,**A_ : List[Any] ) -> Any: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : Optional[int] ,**A_ : List[Any] ) -> Union[str, Any]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: int = ['''torch'''] def __init__( self : Optional[Any] ,*A_ : Any ,**A_ : Any ) -> Tuple: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : List[str] ,**A_ : Dict ) -> Union[str, Any]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : List[str] ,**A_ : Optional[int] ) -> Dict: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: List[Any] = ['''torch'''] def __init__( self : Optional[int] ,*A_ : Any ,**A_ : List[str] ) -> Union[str, Any]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : Any ,**A_ : List[str] ) -> Dict: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : int ,**A_ : Dict ) -> List[str]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: List[Any] = ['''torch'''] def __init__( self : List[str] ,*A_ : Dict ,**A_ : Optional[int] ) -> Dict: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : Dict ,**A_ : Any ) -> int: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Optional[Any] ,**A_ : Tuple ) -> int: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: int = ['''torch'''] def __init__( self : List[str] ,*A_ : List[str] ,**A_ : int ) -> Any: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Tuple ,**A_ : Optional[int] ) -> Any: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : List[Any] ,**A_ : Any ) -> Any: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = ['''torch'''] def __init__( self : Optional[Any] ,*A_ : str ,**A_ : Optional[int] ) -> Dict: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : List[str] ,**A_ : Optional[Any] ) -> Optional[Any]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : List[str] ,**A_ : Tuple ) -> Union[str, Any]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: str = ['''torch'''] def __init__( self : str ,*A_ : Union[str, Any] ,**A_ : Dict ) -> Any: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : List[Any] ,**A_ : Any ) -> Optional[Any]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : List[str] ,**A_ : Optional[Any] ) -> int: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = ['''torch'''] def __init__( self : int ,*A_ : int ,**A_ : Union[str, Any] ) -> str: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : int ,**A_ : List[Any] ) -> Tuple: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Union[str, Any] ,**A_ : Any ) -> Dict: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Tuple = ['''torch'''] def __init__( self : str ,*A_ : Optional[Any] ,**A_ : List[str] ) -> Any: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Optional[Any] ,**A_ : str ) -> Any: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : int ,**A_ : Tuple ) -> Tuple: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: str = ['''torch'''] def __init__( self : List[Any] ,*A_ : List[str] ,**A_ : List[str] ) -> int: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Dict ,**A_ : int ) -> List[str]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Tuple ,**A_ : Optional[Any] ) -> Optional[Any]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: int = ['''torch'''] def __init__( self : Any ,*A_ : List[str] ,**A_ : Any ) -> Tuple: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : Tuple ,**A_ : List[Any] ) -> Dict: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : List[str] ,**A_ : Optional[Any] ) -> List[Any]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Dict = ['''torch'''] def __init__( self : Tuple ,*A_ : List[str] ,**A_ : Optional[Any] ) -> Union[str, Any]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Any ,**A_ : Optional[int] ) -> List[Any]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Dict ,**A_ : Optional[int] ) -> str: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = ['''torch'''] def __init__( self : Any ,*A_ : Optional[int] ,**A_ : List[Any] ) -> str: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : List[Any] ,**A_ : List[str] ) -> Tuple: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : Union[str, Any] ,**A_ : Any ) -> Optional[int]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = ['''torch'''] def __init__( self : List[Any] ,*A_ : Dict ,**A_ : Tuple ) -> Optional[int]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : int ,**A_ : Tuple ) -> Tuple: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : str ,**A_ : Dict ) -> str: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Tuple = ['''torch'''] def __init__( self : Tuple ,*A_ : Union[str, Any] ,**A_ : Optional[int] ) -> Any: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Tuple ,**A_ : Union[str, Any] ) -> List[str]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Union[str, Any] ,**A_ : Optional[int] ) -> List[str]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = ['''torch'''] def __init__( self : Union[str, Any] ,*A_ : Dict ,**A_ : str ) -> Union[str, Any]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Union[str, Any] ,**A_ : List[Any] ) -> Dict: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : str ,**A_ : Optional[Any] ) -> int: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = ['''torch'''] def __init__( self : List[Any] ,*A_ : Optional[Any] ,**A_ : str ) -> str: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : List[str] ,**A_ : str ) -> int: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : List[Any] ,**A_ : Tuple ) -> Dict: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = ['''torch'''] def __init__( self : List[str] ,*A_ : int ,**A_ : Tuple ) -> List[str]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Dict ,**A_ : List[Any] ) -> Union[str, Any]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : Optional[Any] ,**A_ : List[Any] ) -> Dict: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: List[Any] = ['''torch'''] def __init__( self : Union[str, Any] ,*A_ : Any ,**A_ : Dict ) -> List[Any]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : int ,**A_ : int ) -> str: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : str ,**A_ : List[str] ) -> Union[str, Any]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: int = ['''torch'''] def __init__( self : int ,*A_ : Any ,**A_ : Any ) -> int: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : List[Any] ,**A_ : Tuple ) -> List[str]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Tuple ,**A_ : str ) -> List[str]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Any = ['''torch'''] def __init__( self : Tuple ,*A_ : str ,**A_ : List[str] ) -> Any: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : Any ,**A_ : Any ) -> int: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : List[str] ,**A_ : Optional[Any] ) -> Any: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = ['''torch'''] def __init__( self : List[Any] ,*A_ : str ,**A_ : Union[str, Any] ) -> Any: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Dict ,**A_ : Dict ) -> Dict: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : List[Any] ,**A_ : str ) -> Tuple: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = ['''torch'''] def __init__( self : Union[str, Any] ,*A_ : List[Any] ,**A_ : Optional[int] ) -> Optional[int]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : Tuple ,**A_ : Union[str, Any] ) -> Optional[int]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Any ,**A_ : List[Any] ) -> Any: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = ['''torch'''] def __init__( self : str ,*A_ : Optional[Any] ,**A_ : Dict ) -> Union[str, Any]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : List[str] ,**A_ : List[str] ) -> int: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : List[str] ,**A_ : str ) -> List[str]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: str = ['''torch'''] def __init__( self : List[Any] ,*A_ : Dict ,**A_ : Any ) -> Dict: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : List[str] ,**A_ : List[Any] ) -> List[str]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Dict ,**A_ : Optional[int] ) -> Tuple: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: str = ['''torch'''] def __init__( self : Optional[int] ,*A_ : List[Any] ,**A_ : int ) -> Any: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Union[str, Any] ,**A_ : Optional[Any] ) -> int: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : Union[str, Any] ,**A_ : Union[str, Any] ) -> List[str]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: int = ['''torch'''] def __init__( self : int ,*A_ : Optional[Any] ,**A_ : int ) -> Dict: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : Any ,**A_ : str ) -> Dict: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Any ,**A_ : str ) -> Tuple: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Dict = ['''torch'''] def __init__( self : Optional[Any] ,*A_ : Tuple ,**A_ : Dict ) -> List[str]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : Tuple ,**A_ : List[Any] ) -> Any: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : Optional[Any] ,**A_ : List[str] ) -> List[Any]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: List[Any] = ['''torch'''] def __init__( self : List[str] ,*A_ : Dict ,**A_ : List[Any] ) -> Optional[Any]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Optional[Any] ,**A_ : Optional[int] ) -> Union[str, Any]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Union[str, Any] ,**A_ : Union[str, Any] ) -> Union[str, Any]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: int = ['''torch'''] def __init__( self : List[str] ,*A_ : Any ,**A_ : int ) -> int: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Tuple ,**A_ : Tuple ) -> Optional[int]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : List[Any] ,**A_ : str ) -> List[str]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Any = ['''torch'''] def __init__( self : Dict ,*A_ : Tuple ,**A_ : Tuple ) -> List[Any]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : int ,**A_ : Optional[Any] ) -> Tuple: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : str ,**A_ : List[Any] ) -> Tuple: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = ['''torch'''] def __init__( self : Tuple ,*A_ : Any ,**A_ : Tuple ) -> List[Any]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : List[Any] ,**A_ : Optional[Any] ) -> int: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Dict ,**A_ : Dict ) -> Optional[Any]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: List[Any] = ['''torch'''] def __init__( self : Any ,*A_ : Dict ,**A_ : Dict ) -> Tuple: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Optional[Any] ,**A_ : Union[str, Any] ) -> List[str]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Dict ,**A_ : Optional[int] ) -> Union[str, Any]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = ['''torch'''] def __init__( self : List[Any] ,*A_ : Any ,**A_ : Union[str, Any] ) -> Tuple: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : List[Any] ,**A_ : str ) -> str: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Optional[int] ,**A_ : Optional[Any] ) -> str: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Any = ['''torch'''] def __init__( self : Optional[int] ,*A_ : List[str] ,**A_ : int ) -> Optional[Any]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : Dict ,**A_ : List[Any] ) -> Any: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : Union[str, Any] ,**A_ : Any ) -> Tuple: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = ['''torch'''] def __init__( self : str ,*A_ : Optional[Any] ,**A_ : Optional[Any] ) -> Optional[int]: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Optional[int] ,**A_ : Optional[Any] ) -> Union[str, Any]: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : int ,**A_ : str ) -> Optional[Any]: requires_backends(cls ,['torch'] ) class lowerCAmelCase_ ( metaclass=_lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = ['''torch'''] def __init__( self : List[str] ,*A_ : Tuple ,**A_ : Tuple ) -> Any: requires_backends(self ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Optional[int] ,**A_ : int ) -> Any: requires_backends(cls ,['torch'] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : str ,**A_ : Union[str, Any] ) -> Optional[int]: requires_backends(cls ,['torch'] )
74
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''roformer''' def __init__( self : Tuple ,A_ : Optional[int]=5_0000 ,A_ : Tuple=None ,A_ : Optional[Any]=768 ,A_ : Dict=12 ,A_ : Optional[int]=12 ,A_ : Union[str, Any]=3072 ,A_ : Dict="gelu" ,A_ : Dict=0.1 ,A_ : List[Any]=0.1 ,A_ : List[Any]=1536 ,A_ : List[str]=2 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : Optional[int]=0 ,A_ : List[str]=False ,A_ : Tuple=True ,**A_ : List[str] ,) -> Dict: super().__init__(pad_token_id=A_ ,**A_ ) A = vocab_size A = hidden_size if embedding_size is None else embedding_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_act A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = initializer_range A = layer_norm_eps A = rotary_value A = use_cache class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": A = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A = {0: 'batch', 1: 'sequence'} A = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
74
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _lowercase = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
74
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _snake_case ( snake_case__ : Dict ): A = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) def _snake_case ( snake_case__ : int ): A , A = emb.weight.shape A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) A = emb.weight.data return lin_layer def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ): A = torch.load(snake_case__ , map_location='cpu' )['model'] remove_ignore_keys_(snake_case__ ) A = state_dict['encoder.embed_tokens.weight'].shape[0] A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ ) if mbart_aa and finetuned: A = 'relu' A = state_dict['decoder.embed_tokens.weight'] A = MBartForConditionalGeneration(snake_case__ ) model.model.load_state_dict(snake_case__ ) if finetuned: A = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') _lowercase = parser.parse_args() _lowercase = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
74
1
"""simple docstring""" import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _lowercase = object() # For specifying empty leaf dict `{}` _lowercase = object() def _snake_case ( snake_case__ : int , snake_case__ : Optional[int] ): A = tuple((re.compile(x + '$' ) for x in qs) ) for i in range(len(snake_case__ ) - len(snake_case__ ) + 1 ): A = [x.match(snake_case__ ) for x, y in zip(snake_case__ , ks[i:] )] if matches and all(snake_case__ ): return True return False def _snake_case ( snake_case__ : str ): def replace(snake_case__ : Tuple , snake_case__ : Dict ): for rule, replacement in rules: if _match(snake_case__ , snake_case__ ): return replacement return val return replace def _snake_case ( ): return [ # embeddings (("transformer", "wpe", "embedding"), P('mp' , snake_case__ )), (("transformer", "wte", "embedding"), P('mp' , snake_case__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(snake_case__ , 'mp' )), (("attention", "out_proj", "kernel"), P('mp' , snake_case__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(snake_case__ , 'mp' )), (("mlp", "c_fc", "bias"), P('mp' )), (("mlp", "c_proj", "kernel"), P('mp' , snake_case__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _snake_case ( snake_case__ : Optional[Any] ): A = _get_partition_rules() A = _replacement_rules(snake_case__ ) A = {k: _unmatched for k in flatten_dict(snake_case__ )} A = {k: replace(snake_case__ , snake_case__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(snake_case__ ) )
74
"""simple docstring""" import argparse import struct import unittest class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : bytes ) -> None: A = data # Initialize hash values A = [ 0X6_A_0_9_E_6_6_7, 0XB_B_6_7_A_E_8_5, 0X3_C_6_E_F_3_7_2, 0XA_5_4_F_F_5_3_A, 0X5_1_0_E_5_2_7_F, 0X9_B_0_5_6_8_8_C, 0X1_F_8_3_D_9_A_B, 0X5_B_E_0_C_D_1_9, ] # Initialize round constants A = [ 0X4_2_8_A_2_F_9_8, 0X7_1_3_7_4_4_9_1, 0XB_5_C_0_F_B_C_F, 0XE_9_B_5_D_B_A_5, 0X3_9_5_6_C_2_5_B, 0X5_9_F_1_1_1_F_1, 0X9_2_3_F_8_2_A_4, 0XA_B_1_C_5_E_D_5, 0XD_8_0_7_A_A_9_8, 0X1_2_8_3_5_B_0_1, 0X2_4_3_1_8_5_B_E, 0X5_5_0_C_7_D_C_3, 0X7_2_B_E_5_D_7_4, 0X8_0_D_E_B_1_F_E, 0X9_B_D_C_0_6_A_7, 0XC_1_9_B_F_1_7_4, 0XE_4_9_B_6_9_C_1, 0XE_F_B_E_4_7_8_6, 0X0_F_C_1_9_D_C_6, 0X2_4_0_C_A_1_C_C, 0X2_D_E_9_2_C_6_F, 0X4_A_7_4_8_4_A_A, 0X5_C_B_0_A_9_D_C, 0X7_6_F_9_8_8_D_A, 0X9_8_3_E_5_1_5_2, 0XA_8_3_1_C_6_6_D, 0XB_0_0_3_2_7_C_8, 0XB_F_5_9_7_F_C_7, 0XC_6_E_0_0_B_F_3, 0XD_5_A_7_9_1_4_7, 0X0_6_C_A_6_3_5_1, 0X1_4_2_9_2_9_6_7, 0X2_7_B_7_0_A_8_5, 0X2_E_1_B_2_1_3_8, 0X4_D_2_C_6_D_F_C, 0X5_3_3_8_0_D_1_3, 0X6_5_0_A_7_3_5_4, 0X7_6_6_A_0_A_B_B, 0X8_1_C_2_C_9_2_E, 0X9_2_7_2_2_C_8_5, 0XA_2_B_F_E_8_A_1, 0XA_8_1_A_6_6_4_B, 0XC_2_4_B_8_B_7_0, 0XC_7_6_C_5_1_A_3, 0XD_1_9_2_E_8_1_9, 0XD_6_9_9_0_6_2_4, 0XF_4_0_E_3_5_8_5, 0X1_0_6_A_A_0_7_0, 0X1_9_A_4_C_1_1_6, 0X1_E_3_7_6_C_0_8, 0X2_7_4_8_7_7_4_C, 0X3_4_B_0_B_C_B_5, 0X3_9_1_C_0_C_B_3, 0X4_E_D_8_A_A_4_A, 0X5_B_9_C_C_A_4_F, 0X6_8_2_E_6_F_F_3, 0X7_4_8_F_8_2_E_E, 0X7_8_A_5_6_3_6_F, 0X8_4_C_8_7_8_1_4, 0X8_C_C_7_0_2_0_8, 0X9_0_B_E_F_F_F_A, 0XA_4_5_0_6_C_E_B, 0XB_E_F_9_A_3_F_7, 0XC_6_7_1_7_8_F_2, ] A = self.preprocessing(self.data ) self.final_hash() @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : bytes ) -> bytes: A = B'\x80' + (B'\x00' * (63 - (len(A_ ) + 8) % 64)) A = struct.pack('>Q' ,(len(A_ ) * 8) ) return data + padding + big_endian_integer def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None: # Convert into blocks of 64 bytes A = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers A = list(struct.unpack('>16L' ,A_ ) ) # add 48 0-ed integers words += [0] * 48 A , A , A , A , A , A , A , A = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array A = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) A = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) A = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X1_0_0_0_0_0_0_0_0 # Compression A = self.ror(A_ ,6 ) ^ self.ror(A_ ,11 ) ^ self.ror(A_ ,25 ) A = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g) A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X1_0_0_0_0_0_0_0_0 A = self.ror(A_ ,2 ) ^ self.ror(A_ ,13 ) ^ self.ror(A_ ,22 ) A = (a & b) ^ (a & c) ^ (b & c) A = (sa + maj) % 0X1_0_0_0_0_0_0_0_0 A , A , A , A , A , A , A , A = ( g, f, e, ((d + tempa) % 0X1_0_0_0_0_0_0_0_0), c, b, a, ((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0), ) A = [a, b, c, d, e, f, g, h] # Modify final values A = [ ((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0) for index, element in enumerate(self.hashes ) ] A = ''.join([hex(A_ )[2:].zfill(8 ) for value in self.hashes] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : int ) -> int: return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations) class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None: import hashlib A = bytes('Test String' ,'utf-8' ) self.assertEqual(SHAaaa(A_ ).hash ,hashlib.shaaaa(A_ ).hexdigest() ) def _snake_case ( ): import doctest doctest.testmod() A = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) A = parser.parse_args() A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: A = f.read() else: A = bytes(snake_case__ , 'utf-8' ) print(SHAaaa(snake_case__ ).hash ) if __name__ == "__main__": main()
74
1
"""simple docstring""" import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] ,A_ : List[str] ,A_ : Any=13 ,A_ : List[Any]=30 ,A_ : List[str]=2 ,A_ : List[Any]=3 ,A_ : int=True ,A_ : str=True ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : List[str]=4 ,A_ : Union[str, Any]=37 ,A_ : int="gelu" ,A_ : Optional[int]=0.1 ,A_ : Optional[int]=0.1 ,A_ : Tuple=10 ,A_ : List[str]=0.02 ,A_ : Union[str, Any]=3 ,A_ : Dict=None ,A_ : Optional[Any]=2 ,) -> Optional[Any]: A = parent A = batch_size A = image_size A = patch_size A = num_channels A = is_training A = use_labels A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = type_sequence_label_size A = initializer_range A = scope A = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A = (image_size // patch_size) ** 2 A = num_patches + 2 def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: return DeiTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A_ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ,A_ : int ,A_ : Any ) -> str: A = DeiTModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int ,A_ : Optional[int] ,A_ : str ) -> Union[str, Any]: A = DeiTForMaskedImageModeling(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A = 1 A = DeiTForMaskedImageModeling(A_ ) model.to(A_ ) model.eval() A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A = model(A_ ) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any ,A_ : Tuple ,A_ : str ) -> Any: A = self.type_sequence_label_size A = DeiTForImageClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images A = 1 A = DeiTForImageClassification(A_ ) model.to(A_ ) model.eval() A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) _lowerCamelCase: Optional[int] = ( { '''feature-extraction''': DeiTModel, '''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) _lowerCamelCase: Optional[Any] = False _lowerCamelCase: Union[str, Any] = False _lowerCamelCase: Optional[Any] = False def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: A = DeiTModelTester(self ) A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ ,nn.Linear ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ['pixel_values'] self.assertListEqual(arg_names[:1] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : int ,A_ : str ,A_ : Tuple=False ) -> Optional[int]: A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: if not self.model_tester.is_training: return A , A = self.model_tester.prepare_config_and_inputs_for_common() A = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(A_ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue A = model_class(A_ ) model.to(A_ ) model.train() A = self._prepare_for_class(A_ ,A_ ,return_labels=A_ ) A = model(**A_ ).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A = False A = True for model_class in self.all_model_classes: if model_class in get_values(A_ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue A = model_class(A_ ) model.gradient_checkpointing_enable() model.to(A_ ) model.train() A = self._prepare_for_class(A_ ,A_ ,return_labels=A_ ) A = model(**A_ ).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = [ {'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float}, {'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long}, {'title': 'regression', 'num_labels': 1, 'dtype': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(A_ ), *get_values(A_ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ): A = problem_type['title'] A = problem_type['num_labels'] A = model_class(A_ ) model.to(A_ ) model.train() A = self._prepare_for_class(A_ ,A_ ,return_labels=A_ ) if problem_type["num_labels"] > 1: A = inputs['labels'].unsqueeze(1 ).repeat(1 ,problem_type['num_labels'] ) A = inputs['labels'].to(problem_type['dtype'] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=A_ ) as warning_list: A = model(**A_ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'Something is going wrong in the regression problem: intercepted {w.message}' ) loss.backward() @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = DeiTModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _snake_case ( ): A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: A = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to( A_ ) A = self.default_image_processor A = prepare_img() A = image_processor(images=A_ ,return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): A = model(**A_ ) # verify the logits A = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,A_ ) A = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A_ ,atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: A = DeiTModel.from_pretrained( 'facebook/deit-base-distilled-patch16-224' ,torch_dtype=torch.floataa ,device_map='auto' ) A = self.default_image_processor A = prepare_img() A = image_processor(images=A_ ,return_tensors='pt' ) A = inputs.pixel_values.to(A_ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A = model(A_ )
74
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowercase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''DeiTFeatureExtractor'''] _lowercase = ['''DeiTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DeiTForImageClassification''', '''DeiTForImageClassificationWithTeacher''', '''DeiTForMaskedImageModeling''', '''DeiTModel''', '''DeiTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDeiTForImageClassification''', '''TFDeiTForImageClassificationWithTeacher''', '''TFDeiTForMaskedImageModeling''', '''TFDeiTModel''', '''TFDeiTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
74
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right _lowercase = 25_60_47 _lowercase = 25_61_45 @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = NllbTokenizer _lowerCamelCase: Dict = NllbTokenizerFast _lowerCamelCase: str = True _lowerCamelCase: int = True _lowerCamelCase: str = {} def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: super().setUp() # We have a SentencePiece fixture for testing A = NllbTokenizer(A_ ,keep_accents=A_ ) tokenizer.save_pretrained(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = NllbTokenizer(A_ ,keep_accents=A_ ) A = tokenizer.tokenize('This is a test' ) self.assertListEqual(A_ ,['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A_ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,) A = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( A_ ,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] ,) A = tokenizer.convert_tokens_to_ids(A_ ) self.assertListEqual( A_ ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] ,) A = tokenizer.convert_ids_to_tokens(A_ ) self.assertListEqual( A_ ,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] ,) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: A = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ ) A = self.tokenizer_class.from_pretrained(A_ ,**A_ ) A = tempfile.mkdtemp() A = tokenizer_r.save_pretrained(A_ ) A = tokenizer_p.save_pretrained(A_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) A = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(A_ ,A_ ) # Checks everything loads correctly in the same way A = tokenizer_r.from_pretrained(A_ ) A = tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ ,A_ ) ) shutil.rmtree(A_ ) # Save tokenizer rust, legacy_format=True A = tempfile.mkdtemp() A = tokenizer_r.save_pretrained(A_ ,legacy_format=A_ ) A = tokenizer_p.save_pretrained(A_ ) # Checks it save with the same files self.assertSequenceEqual(A_ ,A_ ) # Checks everything loads correctly in the same way A = tokenizer_r.from_pretrained(A_ ) A = tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ ,A_ ) ) shutil.rmtree(A_ ) # Save tokenizer rust, legacy_format=False A = tempfile.mkdtemp() A = tokenizer_r.save_pretrained(A_ ,legacy_format=A_ ) A = tokenizer_p.save_pretrained(A_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way A = tokenizer_r.from_pretrained(A_ ) A = tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ ,A_ ) ) shutil.rmtree(A_ ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: if not self.test_seqaseq: return A = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Longer text that will definitely require truncation. A = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for' ' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons' ' will only worsen the violence and misery for millions of people.', ] A = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al' ' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi' ' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] try: A = tokenizer.prepare_seqaseq_batch( src_texts=A_ ,tgt_texts=A_ ,max_length=3 ,max_target_length=10 ,return_tensors='pt' ,src_lang='eng_Latn' ,tgt_lang='ron_Latn' ,) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.labels.shape[1] ,10 ) # max_target_length will default to max_length if not specified A = tokenizer.prepare_seqaseq_batch( A_ ,tgt_texts=A_ ,max_length=3 ,return_tensors='pt' ) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.labels.shape[1] ,3 ) A = tokenizer.prepare_seqaseq_batch( src_texts=A_ ,max_length=3 ,max_target_length=10 ,return_tensors='pt' ) self.assertEqual(batch_encoder_only.input_ids.shape[1] ,3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] ,3 ) self.assertNotIn('decoder_input_ids' ,A_ ) @unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: pass def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A = [AddedToken('<special>' ,lstrip=A_ )] A = self.rust_tokenizer_class.from_pretrained( A_ ,additional_special_tokens=A_ ,**A_ ) A = tokenizer_r.encode('Hey this is a <special> token' ) A = tokenizer_r.encode('<special>' ,add_special_tokens=A_ )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: A = self.rust_tokenizer_class.from_pretrained( A_ ,additional_special_tokens=A_ ,**A_ ,) A = self.tokenizer_class.from_pretrained( A_ ,additional_special_tokens=A_ ,**A_ ) A = tokenizer_p.encode('Hey this is a <special> token' ) A = tokenizer_cr.encode('Hey this is a <special> token' ) self.assertEqual(A_ ,A_ ) self.assertEqual(A_ ,A_ ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''facebook/nllb-200-distilled-600M''' _lowerCamelCase: Any = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] _lowerCamelCase: Dict = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] _lowerCamelCase: List[str] = [ 256047, 16297, 134408, 8165, 248066, 14734, 950, 1135, 105721, 3573, 83, 27352, 108, 49486, 2, ] @classmethod def _SCREAMING_SNAKE_CASE ( cls : int ) -> List[str]: A = NllbTokenizer.from_pretrained( cls.checkpoint_name ,src_lang='eng_Latn' ,tgt_lang='ron_Latn' ) A = 1 return cls def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] ,25_6001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] ,25_6002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] ,25_6057 ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: self.assertIn(A_ ,self.tokenizer.all_special_ids ) # fmt: off A = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047] # fmt: on A = self.tokenizer.decode(A_ ,skip_special_tokens=A_ ) A = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=A_ ) self.assertEqual(A_ ,A_ ) self.assertNotIn(self.tokenizer.eos_token ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: A = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] ,A_ ) A = 10 A = self.tokenizer(A_ ,max_length=A_ ,truncation=A_ ).input_ids[0] self.assertEqual(ids[-1] ,2 ) self.assertEqual(ids[0] ,A_ ) self.assertEqual(len(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) ,[25_6203, 3] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: A = tempfile.mkdtemp() A = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(A_ ) A = NllbTokenizer.from_pretrained(A_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,A_ ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = self.tokenizer( self.src_text ,text_target=self.tgt_text ,padding=A_ ,truncation=A_ ,max_length=len(self.expected_src_tokens ) ,return_tensors='pt' ,) A = shift_tokens_right( batch['labels'] ,self.tokenizer.pad_token_id ,self.tokenizer.lang_code_to_id['ron_Latn'] ) self.assertIsInstance(A_ ,A_ ) self.assertEqual((2, 15) ,batch.input_ids.shape ) self.assertEqual((2, 15) ,batch.attention_mask.shape ) A = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens ,A_ ) self.assertEqual(A_ ,batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = self.tokenizer(self.src_text ,padding=A_ ,truncation=A_ ,max_length=3 ,return_tensors='pt' ) A = self.tokenizer( text_target=self.tgt_text ,padding=A_ ,truncation=A_ ,max_length=10 ,return_tensors='pt' ) A = targets['input_ids'] A = shift_tokens_right( A_ ,self.tokenizer.pad_token_id ,decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] ,) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.decoder_input_ids.shape[1] ,10 ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: A = self.tokenizer._build_translation_inputs( 'A test' ,return_tensors='pt' ,src_lang='eng_Latn' ,tgt_lang='fra_Latn' ) self.assertEqual( nested_simplify(A_ ) ,{ # A, test, EOS, en_XX 'input_ids': [[25_6047, 70, 7356, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 25_6057, } ,) @require_torch def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: A = True A = self.tokenizer( 'UN Chief says there is no military solution in Syria' ,src_lang='eng_Latn' ,tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids ,[1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] ) A = False A = self.tokenizer( 'UN Chief says there is no military solution in Syria' ,src_lang='eng_Latn' ,tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids ,[25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
74
"""simple docstring""" from __future__ import annotations import requests def _snake_case ( snake_case__ : str ): A = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty' return requests.get(snake_case__ ).json() def _snake_case ( snake_case__ : int = 10 ): A = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' A = requests.get(snake_case__ ).json()[:max_stories] return [get_hackernews_story(snake_case__ ) for story_id in story_ids] def _snake_case ( snake_case__ : int = 10 ): A = hackernews_top_stories(snake_case__ ) return "\n".join('* [{title}]({url})'.format(**snake_case__ ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
74
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowercase = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''OwlViTFeatureExtractor'''] _lowercase = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
74
"""simple docstring""" from string import ascii_uppercase _lowercase = {char: i for i, char in enumerate(ascii_uppercase)} _lowercase = dict(enumerate(ascii_uppercase)) def _snake_case ( snake_case__ : str , snake_case__ : str ): A = len(snake_case__ ) A = 0 while True: if x == i: A = 0 if len(snake_case__ ) == len(snake_case__ ): break key += key[i] i += 1 return key def _snake_case ( snake_case__ : str , snake_case__ : str ): A = '' A = 0 for letter in message: if letter == " ": cipher_text += " " else: A = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def _snake_case ( snake_case__ : str , snake_case__ : str ): A = '' A = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: A = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def _snake_case ( ): A = 'THE GERMAN ATTACK' A = 'SECRET' A = generate_key(snake_case__ , snake_case__ ) A = cipher_text(snake_case__ , snake_case__ ) print(F'Encrypted Text = {s}' ) print(F'Original Text = {original_text(snake_case__ , snake_case__ )}' ) if __name__ == "__main__": import doctest doctest.testmod() main()
74
1
"""simple docstring""" def _snake_case ( snake_case__ : List[str] ): A = 1 A = 2 while i * i <= n: A = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def _snake_case ( ): A = 1 A = 1 while True: i += 1 t_num += i if count_divisors(snake_case__ ) > 500: break return t_num if __name__ == "__main__": print(solution())
74
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ) -> Optional[Any]: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): A = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: A = 'sgugger/tiny-distilbert-classification' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,only_pretrain_model=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,torchscript=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,fpaa=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: A = 'sshleifer/tiny-gpt2' A = AutoConfig.from_pretrained(A_ ) # set architectures equal to `None` A = None A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=A_ ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = 'sshleifer/tiny-gpt2' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = 'sshleifer/tinier_bart' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'sshleifer/tiny-gpt2' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'sshleifer/tinier_bart' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: A = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,save_to_csv=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(A_ ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(A_ ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(A_ ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(A_ ,'train_time.csv' ) ,env_info_csv_file=os.path.join(A_ ,'env.csv' ) ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) benchmark.run() self.assertTrue(Path(os.path.join(A_ ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'train_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'train_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'env.csv' ) ).exists() ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: A = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(A_ : Optional[int] ): self.assertTrue(hasattr(A_ ,'sequential' ) ) self.assertTrue(hasattr(A_ ,'cumulative' ) ) self.assertTrue(hasattr(A_ ,'current' ) ) self.assertTrue(hasattr(A_ ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(A_ ,'log.txt' ) ,log_print=A_ ,trace_memory_line_by_line=A_ ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(A_ ,'log.txt' ) ).exists() )
74
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer _lowercase = logging.get_logger(__name__) _lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} _lowercase = { '''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''}, '''tokenizer_file''': { '''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json''' }, } _lowercase = {'''mobilebert-uncased''': 5_12} _lowercase = {} class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: List[str] = PRETRAINED_INIT_CONFIGURATION _lowerCamelCase: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: List[str] = MobileBertTokenizer def __init__( self : Optional[int] ,A_ : Union[str, Any]=None ,A_ : Dict=None ,A_ : Dict=True ,A_ : Dict="[UNK]" ,A_ : Any="[SEP]" ,A_ : str="[PAD]" ,A_ : Optional[Any]="[CLS]" ,A_ : str="[MASK]" ,A_ : int=True ,A_ : Dict=None ,**A_ : Any ,) -> Dict: super().__init__( A_ ,tokenizer_file=A_ ,do_lower_case=A_ ,unk_token=A_ ,sep_token=A_ ,pad_token=A_ ,cls_token=A_ ,mask_token=A_ ,tokenize_chinese_chars=A_ ,strip_accents=A_ ,**A_ ,) A = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' ,A_ ) != do_lower_case or normalizer_state.get('strip_accents' ,A_ ) != strip_accents or normalizer_state.get('handle_chinese_chars' ,A_ ) != tokenize_chinese_chars ): A = getattr(A_ ,normalizer_state.pop('type' ) ) A = do_lower_case A = strip_accents A = tokenize_chinese_chars A = normalizer_class(**A_ ) A = do_lower_case def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Any=None ) -> Optional[Any]: A = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: A = self._tokenizer.model.save(A_ ,name=A_ ) return tuple(A_ )
74
"""simple docstring""" # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _lowercase = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[Union[str, int]] = None _lowerCamelCase: Optional[Union[str, int]] = None _lowerCamelCase: Optional[Union[str, int]] = None def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: A , A , A = _str_to_version_tuple(self.version_str ) def __repr__( self : Optional[int] ) -> Dict: return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: return self.major, self.minor, self.patch def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ) -> Union[str, Any]: if isinstance(A_ ,A_ ): return Version(A_ ) elif isinstance(A_ ,A_ ): return other raise TypeError(F'{other} (type {type(A_ )}) cannot be compared to version.' ) def __eq__( self : List[Any] ,A_ : Dict ) -> Any: try: A = self._validate_operand(A_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : List[Any] ,A_ : Optional[int] ) -> Tuple: A = self._validate_operand(A_ ) return self.tuple < other.tuple def __hash__( self : Union[str, Any] ) -> Union[str, Any]: return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : List[str] ) -> List[str]: A = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self.version_str def _snake_case ( snake_case__ : List[str] ): A = _VERSION_REG.match(snake_case__ ) if not res: raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] ) def _snake_case ( snake_case__ : str ): return ".".join(str(snake_case__ ) for v in version_tuple )
74
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
74
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml _lowercase = NewType('''DataClass''', Any) _lowercase = NewType('''DataClassType''', Any) def _snake_case ( snake_case__ : Tuple ): if isinstance(snake_case__ , snake_case__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' ) def _snake_case ( snake_case__ : list ): A = {str(snake_case__ ): choice for choice in choices} return lambda snake_case__ : str_to_choice.get(snake_case__ , snake_case__ ) def _snake_case ( *, snake_case__ : Union[str, List[str]] = None , snake_case__ : str = None , snake_case__ : Any = dataclasses.MISSING , snake_case__ : Callable[[], Any] = dataclasses.MISSING , snake_case__ : dict = None , **snake_case__ : Any , ): if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls A = {} if aliases is not None: A = aliases if help is not None: A = help return dataclasses.field(metadata=snake_case__ , default=snake_case__ , default_factory=snake_case__ , **snake_case__ ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Iterable[DataClassType] def __init__( self : List[str] ,A_ : Union[DataClassType, Iterable[DataClassType]] ,**A_ : Any ) -> Optional[int]: # To make the default appear when using --help if "formatter_class" not in kwargs: A = ArgumentDefaultsHelpFormatter super().__init__(**A_ ) if dataclasses.is_dataclass(A_ ): A = [dataclass_types] A = list(A_ ) for dtype in self.dataclass_types: self._add_dataclass_arguments(A_ ) @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ,A_ : dataclasses.Field ) -> Optional[Any]: A = F'--{field.name}' A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type ,A_ ): raise RuntimeError( 'Unresolved type detected, which should have been done with the help of ' '`typing.get_type_hints` method by default' ) A = kwargs.pop('aliases' ,[] ) if isinstance(A_ ,A_ ): A = [aliases] A = getattr(field.type ,'__origin__' ,field.type ) if origin_type is Union or (hasattr(A_ ,'UnionType' ) and isinstance(A_ ,types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(A_ ) not in field.type.__args__ ): raise ValueError( 'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because' ' the argument parser only supports one type per argument.' F' Problem encountered in field \'{field.name}\'.' ) if type(A_ ) not in field.type.__args__: # filter `str` in Union A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] A = getattr(field.type ,'__origin__' ,field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) A = ( field.type.__args__[0] if isinstance(A_ ,field.type.__args__[1] ) else field.type.__args__[1] ) A = getattr(field.type ,'__origin__' ,field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) A = {} if origin_type is Literal or (isinstance(field.type ,A_ ) and issubclass(field.type ,A_ )): if origin_type is Literal: A = field.type.__args__ else: A = [x.value for x in field.type] A = make_choice_type_function(kwargs['choices'] ) if field.default is not dataclasses.MISSING: A = field.default else: A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument A = copy(A_ ) # Hack because type=bool in argparse does not behave as we want. A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way A = default # This tells argparse we accept 0 or 1 value after --field_name A = '?' # This is the value that will get picked if we do --field_name (without value) A = True elif isclass(A_ ) and issubclass(A_ ,A_ ): A = field.type.__args__[0] A = '+' if field.default_factory is not dataclasses.MISSING: A = field.default_factory() elif field.default is dataclasses.MISSING: A = True else: A = field.type if field.default is not dataclasses.MISSING: A = field.default elif field.default_factory is not dataclasses.MISSING: A = field.default_factory() else: A = True parser.add_argument(A_ ,*A_ ,**A_ ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): A = False parser.add_argument(F'--no_{field.name}' ,action='store_false' ,dest=field.name ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : DataClassType ) -> List[Any]: if hasattr(A_ ,'_argument_group_name' ): A = self.add_argument_group(dtype._argument_group_name ) else: A = self try: A = get_type_hints(A_ ) except NameError: raise RuntimeError( F'Type resolution failed for {dtype}. Try declaring the class in global scope or ' 'removing line of `from __future__ import annotations` which opts in Postponed ' 'Evaluation of Annotations (PEP 563)' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A_ ): A = '.'.join(map(A_ ,sys.version_info[:3] ) ) raise RuntimeError( F'Type resolution failed for {dtype} on Python {python_version}. Try removing ' 'line of `from __future__ import annotations` which opts in union types as ' '`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ' 'support Python versions that lower than 3.10, you need to use ' '`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ' '`X | None`.' ) from ex raise for field in dataclasses.fields(A_ ): if not field.init: continue A = type_hints[field.name] self._parse_dataclass_field(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Any=None ,A_ : int=False ,A_ : Any=True ,A_ : List[str]=None ,A_ : Union[str, Any]=None ,) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): A = [] if args_filename: args_files.append(Path(A_ ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values A = ArgumentParser() args_file_parser.add_argument(A_ ,type=A_ ,action='append' ) # Use only remaining args for further parsing (remove the args_file_flag) A , A = args_file_parser.parse_known_args(args=A_ ) A = vars(A_ ).get(args_file_flag.lstrip('-' ) ,A_ ) if cmd_args_file_paths: args_files.extend([Path(A_ ) for p in cmd_args_file_paths] ) A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last A = file_args + args if args is not None else file_args + sys.argv[1:] A , A = self.parse_known_args(args=A_ ) A = [] for dtype in self.dataclass_types: A = {f.name for f in dataclasses.fields(A_ ) if f.init} A = {k: v for k, v in vars(A_ ).items() if k in keys} for k in keys: delattr(A_ ,A_ ) A = dtype(**A_ ) outputs.append(A_ ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(A_ ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' ) return (*outputs,) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Dict[str, Any] ,A_ : bool = False ) -> Tuple[DataClass, ...]: A = set(args.keys() ) A = [] for dtype in self.dataclass_types: A = {f.name for f in dataclasses.fields(A_ ) if f.init} A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) A = dtype(**A_ ) outputs.append(A_ ) if not allow_extra_keys and unused_keys: raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(A_ )}' ) return tuple(A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]: with open(Path(A_ ) ,encoding='utf-8' ) as open_json_file: A = json.loads(open_json_file.read() ) A = self.parse_dict(A_ ,allow_extra_keys=A_ ) return tuple(A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]: A = self.parse_dict(yaml.safe_load(Path(A_ ).read_text() ) ,allow_extra_keys=A_ ) return tuple(A_ )
74
1
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) def _a ( a :str ) -> Dict: a = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a = 128 elif "12-12" in model_name: a = 12 a = 12 elif "14-14" in model_name: a = 14 a = 14 elif "16-16" in model_name: a = 16 a = 16 else: raise ValueError('''Model not supported''' ) a = '''huggingface/label-files''' if "speech-commands" in model_name: a = 35 a = '''speech-commands-v2-id2label.json''' else: a = 527 a = '''audioset-id2label.json''' a = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) ) a = {int(a ): v for k, v in idalabel.items()} a = idalabel a = {v: k for k, v in idalabel.items()} return config def _a ( a :Optional[Any] ) -> Union[str, Any]: if "module.v" in name: a = name.replace('''module.v''' , '''audio_spectrogram_transformer''' ) if "cls_token" in name: a = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "dist_token" in name: a = name.replace('''dist_token''' , '''embeddings.distillation_token''' ) if "pos_embed" in name: a = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: a = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) # transformer blocks if "blocks" in name: a = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: a = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: a = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: a = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: a = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: a = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: a = name.replace('''mlp.fc2''' , '''output.dense''' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' ) # classifier head if "module.mlp_head.0" in name: a = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' ) if "module.mlp_head.1" in name: a = name.replace('''module.mlp_head.1''' , '''classifier.dense''' ) return name def _a ( a :Optional[int] , a :List[str] ) -> Any: for key in orig_state_dict.copy().keys(): a = orig_state_dict.pop(a ) if "qkv" in key: a = key.split('''.''' ) a = int(key_split[3] ) a = config.hidden_size if "weight" in key: a = val[:dim, :] a = val[dim : dim * 2, :] a = val[-dim:, :] else: a = val[:dim] a = val[dim : dim * 2] a = val[-dim:] else: a = val return orig_state_dict def _a ( a :Union[str, Any] ) -> Optional[Any]: a = [ '''module.v.head.weight''', '''module.v.head.bias''', '''module.v.head_dist.weight''', '''module.v.head_dist.bias''', ] for k in ignore_keys: state_dict.pop(a , a ) @torch.no_grad() def _a ( a :Union[str, Any] , a :Optional[int] , a :str=False ) -> Union[str, Any]: a = get_audio_spectrogram_transformer_config(a ) a = { '''ast-finetuned-audioset-10-10-0.4593''': ( '''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.450''': ( '''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448''': ( '''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448-v2''': ( '''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1''' ), '''ast-finetuned-audioset-12-12-0.447''': ( '''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1''' ), '''ast-finetuned-audioset-14-14-0.443''': ( '''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1''' ), '''ast-finetuned-audioset-16-16-0.442''': ( '''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1''' ), '''ast-finetuned-speech-commands-v2''': ( '''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1''' ), } # load original state_dict a = model_name_to_url[model_name] a = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' ) # remove some keys remove_keys(a ) # rename some keys a = convert_state_dict(a , a ) # load 🤗 model a = ASTForAudioClassification(a ) model.eval() model.load_state_dict(a ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a = -4.2_677_393 if '''speech-commands''' not in model_name else -6.845_978 a = 4.5_689_974 if '''speech-commands''' not in model_name else 5.5_654_526 a = 1_024 if '''speech-commands''' not in model_name else 128 a = ASTFeatureExtractor(mean=a , std=a , max_length=a ) if "speech-commands" in model_name: a = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' ) a = dataset[0]['''audio''']['''array'''] else: a = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , ) a , a = torchaudio.load(a ) a = waveform.squeeze().numpy() a = feature_extractor(a , sampling_rate=16_000 , return_tensors='''pt''' ) # forward pass a = model(**a ) a = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a = torch.tensor([-1.1_986, -7.0_903, -8.2_718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a = torch.tensor([-2.6_128, -8.0_080, -9.4_344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a = torch.tensor([-1.5_080, -7.4_534, -8.8_917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a = torch.tensor([-0.5_050, -6.5_833, -8.0_843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a = torch.tensor([-0.3_826, -7.0_336, -8.2_413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a = torch.tensor([-1.2_113, -6.9_101, -8.3_470] ) elif model_name == "ast-finetuned-speech-commands-v2": a = torch.tensor([6.1_589, -8.0_566, -8.7_984] ) else: raise ValueError('''Unknown model name''' ) if not torch.allclose(logits[0, :3] , a , atol=1e-4 ): raise ValueError('''Logits don\'t match''' ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(a ).mkdir(exist_ok=a ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(a ) print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" ) feature_extractor.save_pretrained(a ) if push_to_hub: print('''Pushing model and feature extractor to the hub...''' ) model.push_to_hub(F"""MIT/{model_name}""" ) feature_extractor.push_to_hub(F"""MIT/{model_name}""" ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="ast-finetuned-audioset-10-10-0.4593", type=str, help="Name of the Audio Spectrogram Transformer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) UpperCAmelCase__ = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _lowercase = 16 _lowercase = 32 def _snake_case ( snake_case__ : Accelerator , snake_case__ : int = 16 , snake_case__ : str = "bert-base-cased" ): A = AutoTokenizer.from_pretrained(snake_case__ ) A = load_dataset('glue' , 'mrpc' ) def tokenize_function(snake_case__ : Dict ): # max_length=None => use the model max length (it's actually the default) A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset A = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=snake_case__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(snake_case__ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. A = DataLoader( tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) A = DataLoader( tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Optional[int] ): # Initialize accelerator A = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A = config['lr'] A = int(config['num_epochs'] ) A = int(config['seed'] ) A = int(config['batch_size'] ) A = args.model_name_or_path set_seed(snake_case__ ) A , A = get_dataloaders(snake_case__ , snake_case__ , snake_case__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ ) # Instantiate optimizer A = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) A = optimizer_cls(params=model.parameters() , lr=snake_case__ ) if accelerator.state.deepspeed_plugin is not None: A = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: A = 1 A = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): A = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , ) else: A = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A , A , A , A , A = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # We need to keep track of how many total steps we have iterated over A = 0 # We also need to keep track of the stating epoch so files are named properly A = 0 # Now we train the model A = evaluate.load('glue' , 'mrpc' ) A = 0 A = {} for epoch in range(snake_case__ , snake_case__ ): model.train() for step, batch in enumerate(snake_case__ ): A = model(**snake_case__ ) A = outputs.loss A = loss / gradient_accumulation_steps accelerator.backward(snake_case__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() A = 0 for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A = model(**snake_case__ ) A = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times A , A = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(snake_case__ ) - 1: A = predictions[: len(eval_dataloader.dataset ) - samples_seen] A = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) A = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , snake_case__ ) A = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: A = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(snake_case__ , snake_case__ ) def _snake_case ( ): A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=snake_case__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , ) parser.add_argument( '--output_dir' , type=snake_case__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=snake_case__ , default=snake_case__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=snake_case__ , default=3 , help='Number of train epochs.' , ) A = parser.parse_args() A = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
74
0
'''simple docstring''' import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE_: Union[str, Any] =get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece class __A ( UpperCamelCase__ , unittest.TestCase ): a__ : Optional[Any] = XLMProphetNetTokenizer a__ : Dict = False a__ : List[Any] = True def _lowercase (self : str ): super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase_ = XLMProphetNetTokenizer(__a , keep_accents=__a ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase (self : List[Any] ): UpperCAmelCase_ = "[PAD]" UpperCAmelCase_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a ) def _lowercase (self : List[str] ): UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "[PAD]" ) self.assertEqual(vocab_keys[1] , "[CLS]" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(__a ) , 1012 ) def _lowercase (self : Any ): self.assertEqual(self.get_tokenizer().vocab_size , 1012 ) def _lowercase (self : Tuple ): UpperCAmelCase_ = XLMProphetNetTokenizer(__a , keep_accents=__a ) UpperCAmelCase_ = tokenizer.tokenize("This is a test" ) self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__a ) self.assertListEqual( __a , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "[UNK]", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "[UNK]", ".", ] , ) @cached_property def _lowercase (self : Optional[int] ): return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" ) @slow def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = "Hello World!" UpperCAmelCase_ = [35389, 6672, 49, 2] self.assertListEqual(__a , self.big_tokenizer.encode(__a ) ) @slow def _lowercase (self : int ): # fmt: off UpperCAmelCase_ = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
1
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ,A_ : str ,A_ : Dict=13 ,A_ : str=7 ,A_ : str=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=False ,A_ : str=False ,A_ : Tuple=False ,A_ : str=2 ,A_ : Optional[int]=99 ,A_ : Union[str, Any]=0 ,A_ : Optional[Any]=32 ,A_ : Optional[int]=5 ,A_ : Optional[int]=4 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=0.1 ,A_ : Union[str, Any]=512 ,A_ : Union[str, Any]=2 ,A_ : Any=0.02 ,A_ : List[str]=2 ,A_ : int=4 ,A_ : int="last" ,A_ : Dict=True ,A_ : Union[str, Any]=None ,A_ : Any=0 ,) -> List[Any]: A = parent A = batch_size A = seq_length A = is_training A = use_input_lengths A = use_token_type_ids A = use_labels A = gelu_activation A = sinusoidal_embeddings A = causal A = asm A = n_langs A = vocab_size A = n_special A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = summary_type A = use_proj A = scope A = bos_token_id def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_input_lengths: A = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,2 ).float() A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: return XLMConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ,A_ : int ,A_ : Dict ,A_ : str ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : int ,A_ : str ,) -> Any: A = XLMModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,lengths=A_ ,langs=A_ ) A = model(A_ ,langs=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Any ,A_ : str ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : str ,A_ : Any ,A_ : str ,A_ : Dict ,) -> Dict: A = XLMWithLMHeadModel(A_ ) model.to(A_ ) model.eval() A = model(A_ ,token_type_ids=A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Any ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[Any] ,) -> int: A = XLMForQuestionAnsweringSimple(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,start_positions=A_ ,end_positions=A_ ) A = outputs self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Optional[int] ,A_ : Any ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Tuple ,A_ : List[str] ,A_ : Optional[int] ,) -> List[Any]: A = XLMForQuestionAnswering(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,) ((A) , ) = result_with_labels.to_tuple() A = model(A_ ,start_positions=A_ ,end_positions=A_ ) ((A) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape ,() ) self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : int ,A_ : Optional[int] ,A_ : List[str] ,A_ : str ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ,) -> Optional[int]: A = XLMForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ,A_ : str ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : Optional[int] ,) -> List[str]: A = self.num_labels A = XLMForTokenClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : Union[str, Any] ,A_ : Dict ,A_ : List[Any] ,) -> List[str]: A = self.num_choices A = XLMForMultipleChoice(config=A_ ) model.to(A_ ) model.eval() A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = model( A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _lowerCamelCase: str = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _lowerCamelCase: Optional[int] = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ) -> Any: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Optional[int] ,A_ : List[Any]=False ) -> int: A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = XLMModelTester(self ) A = ConfigTester(self ,config_class=A_ ,emb_dim=37 ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : Any ,A_ : str ,A_ : Tuple ,A_ : Any ,A_ : Any=False ,A_ : Any=1 ) -> List[Any]: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) ) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = min_length + idx + 1 A = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : str ,A_ : Optional[int] ,A_ : int ,A_ : Any ,A_ : str=False ,A_ : Any=1 ) -> Tuple: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,) pass @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = XLMModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(A_ ) A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president A = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A = model.generate(A_ ,do_sample=A_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ )
74
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE (A , A ) -> tuple[float, float]: """simple docstring""" if not len(A ) == len(A ) == 3: raise ValueError('''Please enter a valid equation.''' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('''Both a & b of two equations can\'t be zero.''' ) # Extract the coefficients lowercase__ ,lowercase__ ,lowercase__ = equationa lowercase__ ,lowercase__ ,lowercase__ = equationa # Calculate the determinants of the matrices lowercase__ = aa * ba - aa * ba lowercase__ = ca * ba - ca * ba lowercase__ = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('''Infinite solutions. (Consistent system)''' ) else: raise ValueError('''No solution. (Inconsistent system)''' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: lowercase__ = determinant_x / determinant lowercase__ = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
2
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf _lowercase = logging.get_logger(__name__) @dataclass class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : int ,**A_ : Any ) -> Any: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: A = deprecated_arg[3:] A = not kwargs.pop(A_ ) logger.warning( F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or' F' {positive_arg}={kwargs[positive_arg]}' ) A = kwargs.pop('tpu_name' ,self.tpu_name ) A = kwargs.pop('device_idx' ,self.device_idx ) A = kwargs.pop('eager_mode' ,self.eager_mode ) A = kwargs.pop('use_xla' ,self.use_xla ) super().__init__(**A_ ) _lowerCamelCase: str = field( default=_lowercase , metadata={'''help''': '''Name of TPU'''} , ) _lowerCamelCase: int = field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) _lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Benchmark models in eager model.'''} ) _lowerCamelCase: bool = field( default=_lowercase , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self ,['tf'] ) A = None if self.tpu: try: if self.tpu_name: A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: A = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: A = None return tpu @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self ,['tf'] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) A = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' ) A = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' ) else: tf.config.set_visible_devices([] ,'GPU' ) # disable GPU A = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' ) return strategy @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool: requires_backends(self ,['tf'] ) return self._setup_tpu is not None @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> "tf.distribute.Strategy": requires_backends(self ,['tf'] ) return self._setup_strategy @property def _SCREAMING_SNAKE_CASE ( self : int ) -> str: requires_backends(self ,['tf'] ) return tf.config.list_physical_devices('GPU' ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: requires_backends(self ,['tf'] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _SCREAMING_SNAKE_CASE ( self : str ) -> bool: return self.n_gpu > 0
74
0
'''simple docstring''' import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=33 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]: """simple docstring""" A : int = parent A : int = batch_size A : List[Any] = seq_length A : Union[str, Any] = is_training A : Optional[Any] = use_input_mask A : List[str] = use_token_type_ids A : str = use_labels A : Optional[Any] = vocab_size A : Union[str, Any] = hidden_size A : Tuple = num_hidden_layers A : Any = num_attention_heads A : Dict = intermediate_size A : Tuple = hidden_act A : Optional[Any] = hidden_dropout_prob A : List[str] = attention_probs_dropout_prob A : Any = max_position_embeddings A : str = type_vocab_size A : Dict = type_sequence_label_size A : Tuple = initializer_range A : Dict = num_labels A : Optional[Any] = num_choices A : List[str] = scope def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Dict = None if self.use_input_mask: A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) A : Dict = None A : List[str] = None A : List[Any] = None if self.use_labels: A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) A : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ) -> str: """simple docstring""" return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Any = EsmModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) A : int = model(SCREAMING_SNAKE_CASE ) A : List[str] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : List[str] = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Union[str, Any] = self.num_labels A : Union[str, Any] = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Optional[int] = self.prepare_config_and_inputs() ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) : Optional[int] = config_and_inputs A : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = False __magic_name__ = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) __magic_name__ = () __magic_name__ = ( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = True def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Any = EsmModelTester(self ) A : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A : Optional[Any] = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Any = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : List[Any] = self.model_tester.prepare_config_and_inputs()[0] A : Optional[Any] = EsmEmbeddings(config=SCREAMING_SNAKE_CASE ) A : Any = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) A : Any = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) A : int = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : List[Any] = self.model_tester.prepare_config_and_inputs()[0] A : int = EsmEmbeddings(config=SCREAMING_SNAKE_CASE ) A : Optional[Any] = torch.empty(2 , 4 , 30 ) A : Any = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] A : Any = torch.as_tensor([expected_single_positions, expected_single_positions] ) A : int = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ) @unittest.skip('''Esm does not support embedding resizing''' ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" pass @unittest.skip('''Esm does not support embedding resizing''' ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" pass @require_torch class A ( __snake_case ): @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with torch.no_grad(): A : List[str] = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) model.eval() A : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) A : Union[str, Any] = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = 33 A : List[str] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Tuple = torch.tensor( [[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" with torch.no_grad(): A : str = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) model.eval() A : Any = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) A : int = model(SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. A : str = torch.tensor( [[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
3
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig _lowercase = logging.get_logger(__name__) _lowercase = { '''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''', # See all DPT models at https://huggingface.co/models?filter=dpt } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Tuple = '''dpt''' def __init__( self : str ,A_ : Tuple=768 ,A_ : int=12 ,A_ : Optional[int]=12 ,A_ : Optional[int]=3072 ,A_ : List[str]="gelu" ,A_ : str=0.0 ,A_ : int=0.0 ,A_ : str=0.02 ,A_ : str=1e-12 ,A_ : str=384 ,A_ : Dict=16 ,A_ : Union[str, Any]=3 ,A_ : Dict=False ,A_ : Any=True ,A_ : Optional[int]=[2, 5, 8, 11] ,A_ : Optional[Any]="project" ,A_ : Tuple=[4, 2, 1, 0.5] ,A_ : int=[96, 192, 384, 768] ,A_ : int=256 ,A_ : str=-1 ,A_ : Optional[int]=False ,A_ : Optional[int]=True ,A_ : Union[str, Any]=0.4 ,A_ : Union[str, Any]=255 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=[1, 1024, 24, 24] ,A_ : List[str]=[0, 1] ,A_ : List[Any]=None ,**A_ : Tuple ,) -> Union[str, Any]: super().__init__(**A_ ) A = hidden_size A = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('Initializing the config with a `BiT` backbone.' ) A = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, } A = BitConfig(**A_ ) elif isinstance(A_ ,A_ ): logger.info('Initializing the config with a `BiT` backbone.' ) A = BitConfig(**A_ ) elif isinstance(A_ ,A_ ): A = backbone_config else: raise ValueError( F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' ) A = backbone_featmap_shape A = neck_ignore_stages if readout_type != "project": raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' ) else: A = None A = None A = [] A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' ) A = readout_type A = reassemble_factors A = neck_hidden_sizes A = fusion_hidden_size A = head_in_index A = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) A = use_auxiliary_head A = auxiliary_loss_weight A = semantic_loss_ignore_index A = semantic_classifier_dropout def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: A = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: A = self.backbone_config.to_dict() A = self.__class__.model_type return output
74
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class UpperCAmelCase_ ( unittest.TestCase ): def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Tuple=1_8 , UpperCAmelCase__ : Optional[int]=3_0 , UpperCAmelCase__ : Union[str, Any]=4_0_0 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[Any]=True , ) -> int: lowerCAmelCase = size if size is not None else {'height': 1_8, 'width': 1_8} lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution lowerCAmelCase = do_resize lowerCAmelCase = size lowerCAmelCase = apply_ocr def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class UpperCAmelCase_ ( __lowercase , unittest.TestCase ): lowerCamelCase : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __UpperCAmelCase ( self : Tuple ) -> int: lowerCAmelCase = LayoutLMvaImageProcessingTester(self ) @property def __UpperCAmelCase ( self : Any ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self : int ) -> Union[str, Any]: lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) ) def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} ) lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} ) def __UpperCAmelCase ( self : Dict ) -> str: pass def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: # Initialize image_processing lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , UpperCAmelCase__ ) self.assertIsInstance(encoding.boxes , UpperCAmelCase__ ) # Test batched lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __UpperCAmelCase ( self : Optional[int] ) -> Dict: # Initialize image_processing lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __UpperCAmelCase ( self : List[Any] ) -> Any: # Initialize image_processing lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __UpperCAmelCase ( self : str ) -> Optional[int]: # with apply_OCR = True lowerCAmelCase = LayoutLMvaImageProcessor() from datasets import load_dataset lowerCAmelCase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' ) lowerCAmelCase = Image.open(ds[0]['file'] ).convert('RGB' ) lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowerCAmelCase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 lowerCAmelCase = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , UpperCAmelCase__ ) self.assertListEqual(encoding.boxes , UpperCAmelCase__ ) # with apply_OCR = False lowerCAmelCase = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ ) lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
4
"""simple docstring""" from __future__ import annotations import math _lowercase = '''2020.9.26''' _lowercase = '''xcodz-dot, cclaus, dhruvmanila''' def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float ): if not all(isinstance(snake_case__ , (float, int) ) for val in locals().values() ): A = F'Input values must either be float or int: {list(locals().values() )}' raise TypeError(snake_case__ ) A = ((x * distance) / (z + distance)) * scale A = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : str , snake_case__ : float ): if not isinstance(snake_case__ , snake_case__ ): raise TypeError('Axis must be a str' ) A = locals() del input_variables["axis"] if not all(isinstance(snake_case__ , (float, int) ) for val in input_variables.values() ): A = ( 'Input values except axis must either be float or int: ' F'{list(input_variables.values() )}' ) raise TypeError(snake_case__ ) A = (angle % 360) / 450 * 180 / math.pi if axis == "z": A = x * math.cos(snake_case__ ) - y * math.sin(snake_case__ ) A = y * math.cos(snake_case__ ) + x * math.sin(snake_case__ ) A = z elif axis == "x": A = y * math.cos(snake_case__ ) - z * math.sin(snake_case__ ) A = z * math.cos(snake_case__ ) + y * math.sin(snake_case__ ) A = x elif axis == "y": A = x * math.cos(snake_case__ ) - z * math.sin(snake_case__ ) A = z * math.cos(snake_case__ ) + x * math.sin(snake_case__ ) A = y else: raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""") print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
74
0
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=None ) -> Tuple: """simple docstring""" assert torch_layer.weight.shape == weight.shape, F"{torch_layer} layer.weight does not match" _lowercase =nn.Parameter(__snake_case ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F"{torch_layer} layer.bias does not match" _lowercase =nn.Parameter(__snake_case ) def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Union[str, Any]: """simple docstring""" _lowercase =np.asarray(weights[0] ) _lowercase =np.asarray(weights[1] ) _lowercase =np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , ) set_param( torch_layer.self_attention.value , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , ) set_param( torch_layer.output.dense , torch.tensor(__snake_case ).view(-1 , __snake_case ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> int: """simple docstring""" _lowercase =np.asarray(weights[0] ) _lowercase =np.asarray(weights[1] ) _lowercase =np.asarray(weights[2] ) _lowercase =np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , ) set_param( torch_layer.self_attention.key , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , ) set_param( torch_layer.self_attention.value , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , ) set_param( torch_layer.output.dense , torch.tensor(__snake_case ).view(-1 , __snake_case ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Optional[Any]: """simple docstring""" _lowercase =weights[0][0][0] _lowercase =np.asarray(layer_norm_a[0] ) _lowercase =np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(__snake_case ) , torch.tensor(__snake_case ) , ) # lsh weights + output _lowercase =weights[0][1] if len(__snake_case ) < 4: set_layer_weights_in_torch_lsh(__snake_case , torch_block.attention , __snake_case ) else: set_layer_weights_in_torch_local(__snake_case , torch_block.attention , __snake_case ) # intermediate weighs _lowercase =weights[2][0][1][2] # Chunked Feed Forward if len(__snake_case ) == 4: _lowercase =intermediate_weights[2] # layernorm 2 _lowercase =np.asarray(intermediate_weights[0][0] ) _lowercase =np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(__snake_case ) , torch.tensor(__snake_case ) , ) # intermediate dense _lowercase =np.asarray(intermediate_weights[1][0] ) _lowercase =np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(__snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(__snake_case ) , ) # intermediate out _lowercase =np.asarray(intermediate_weights[4][0] ) _lowercase =np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(__snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(__snake_case ) , ) def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> List[Any]: """simple docstring""" _lowercase =torch_model.reformer # word embeds _lowercase =np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(__snake_case ) , ) if isinstance(weights[3] , __snake_case ): _lowercase =torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): _lowercase =np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F"{position_embeddings[emb_idx]} emb does not match" _lowercase =nn.Parameter(torch.tensor(__snake_case ) ) _lowercase =weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( __snake_case ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): _lowercase =trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(__snake_case , __snake_case , __snake_case ) # output layer norm _lowercase =np.asarray(weights[7][0] ) _lowercase =np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(__snake_case ) , torch.tensor(__snake_case ) , ) # output embeddings _lowercase =np.asarray(weights[9][0] ) _lowercase =np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(__snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(__snake_case ) , ) def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Any: """simple docstring""" _lowercase =ReformerConfig.from_json_file(__snake_case ) print(F"Building PyTorch model from configuration: {config}" ) _lowercase =ReformerModelWithLMHead(__snake_case ) with open(__snake_case , '''rb''' ) as f: _lowercase =pickle.load(__snake_case )['''weights'''] set_model_weights_in_torch(__snake_case , __snake_case , config.hidden_size ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained Reformer model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCAmelCase__ = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
5
"""simple docstring""" class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int ,A_ : int ) -> Union[str, Any]: A = n A = [None] * self.n A = 0 # index of the first element A = 0 A = 0 def __len__( self : int ) -> int: return self.size def _SCREAMING_SNAKE_CASE ( self : Any ) -> bool: return self.size == 0 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: return False if self.is_empty() else self.array[self.front] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> int: if self.size >= self.n: raise Exception('QUEUE IS FULL' ) A = data A = (self.rear + 1) % self.n self.size += 1 return self def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: if self.size == 0: raise Exception('UNDERFLOW' ) A = self.array[self.front] A = None A = (self.front + 1) % self.n self.size -= 1 return temp
74
0
from __future__ import annotations A : Optional[int] = [] def __lowerCAmelCase ( a__ , a__ , a__ ) -> bool: for i in range(len(a__ ) ): if board[row][i] == 1: return False for i in range(len(a__ ) ): if board[i][column] == 1: return False for i, j in zip(range(a__ , -1 , -1 ) , range(a__ , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(a__ , -1 , -1 ) , range(a__ , len(a__ ) ) ): if board[i][j] == 1: return False return True def __lowerCAmelCase ( a__ , a__ ) -> bool: if row >= len(a__ ): solution.append(a__ ) printboard(a__ ) print() return True for i in range(len(a__ ) ): if is_safe(a__ , a__ , a__ ): __a = 1 solve(a__ , row + 1 ) __a = 0 return False def __lowerCAmelCase ( a__ ) -> None: for i in range(len(a__ ) ): for j in range(len(a__ ) ): if board[i][j] == 1: print('''Q''' , end=''' ''' ) else: print('''.''' , end=''' ''' ) print() # n=int(input("The no. of queens")) A : int = 8 A : Optional[int] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
6
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _lowercase = logging.get_logger(__name__) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Union[str, Any] ,*A_ : List[str] ,**A_ : int ) -> None: warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' ,A_ ,) super().__init__(*A_ ,**A_ )
74
0
import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class A ( unittest.TestCase ): """simple docstring""" lowerCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def snake_case__ ( self : List[Any],lowercase_ : Union[str, Any],lowercase_ : int,lowercase_ : Dict )-> Any: '''simple docstring''' A__ = TextaTextGenerationPipeline(model=lowercase_,tokenizer=lowercase_ ) return generator, ["Something to write", "Something else"] def snake_case__ ( self : int,lowercase_ : List[str],lowercase_ : List[Any] )-> Optional[int]: '''simple docstring''' A__ = generator('Something there' ) self.assertEqual(lowercase_,[{'generated_text': ANY(lowercase_ )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) ) A__ = generator(['This is great !', 'Something else'],num_return_sequences=2,do_sample=lowercase_ ) self.assertEqual( lowercase_,[ [{'generated_text': ANY(lowercase_ )}, {'generated_text': ANY(lowercase_ )}], [{'generated_text': ANY(lowercase_ )}, {'generated_text': ANY(lowercase_ )}], ],) A__ = generator( ['This is great !', 'Something else'],num_return_sequences=2,batch_size=2,do_sample=lowercase_ ) self.assertEqual( lowercase_,[ [{'generated_text': ANY(lowercase_ )}, {'generated_text': ANY(lowercase_ )}], [{'generated_text': ANY(lowercase_ )}, {'generated_text': ANY(lowercase_ )}], ],) with self.assertRaises(lowercase_ ): generator(4 ) @require_torch def snake_case__ ( self : Tuple )-> List[Any]: '''simple docstring''' A__ = pipeline('text2text-generation',model='patrickvonplaten/t5-tiny-random',framework='pt' ) # do_sample=False necessary for reproducibility A__ = generator('Something there',do_sample=lowercase_ ) self.assertEqual(lowercase_,[{'generated_text': ''}] ) A__ = 3 A__ = generator( 'Something there',num_return_sequences=lowercase_,num_beams=lowercase_,) A__ = [ {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': ''}, ] self.assertEqual(lowercase_,lowercase_ ) A__ = generator('This is a test',do_sample=lowercase_,num_return_sequences=2,return_tensors=lowercase_ ) self.assertEqual( lowercase_,[ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ],) A__ = generator.model.config.eos_token_id A__ = '<pad>' A__ = generator( ['This is a test', 'This is a second test'],do_sample=lowercase_,num_return_sequences=2,batch_size=2,return_tensors=lowercase_,) self.assertEqual( lowercase_,[ [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], ],) @require_tf def snake_case__ ( self : List[str] )-> List[str]: '''simple docstring''' A__ = pipeline('text2text-generation',model='patrickvonplaten/t5-tiny-random',framework='tf' ) # do_sample=False necessary for reproducibility A__ = generator('Something there',do_sample=lowercase_ ) self.assertEqual(lowercase_,[{'generated_text': ''}] )
7
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = '''gpt_bigcode''' _lowerCamelCase: List[Any] = ['''past_key_values'''] _lowerCamelCase: int = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Optional[int] ,A_ : Dict=5_0257 ,A_ : Union[str, Any]=1024 ,A_ : str=768 ,A_ : Any=12 ,A_ : Any=12 ,A_ : Optional[int]=None ,A_ : Any="gelu_pytorch_tanh" ,A_ : List[str]=0.1 ,A_ : Optional[int]=0.1 ,A_ : List[str]=0.1 ,A_ : Tuple=1e-5 ,A_ : Optional[int]=0.02 ,A_ : List[str]=True ,A_ : Optional[Any]=True ,A_ : List[Any]=5_0256 ,A_ : Union[str, Any]=5_0256 ,A_ : int=True ,A_ : Optional[Any]=True ,A_ : Dict=True ,**A_ : Union[str, Any] ,) -> Union[str, Any]: A = vocab_size A = n_positions A = n_embd A = n_layer A = n_head A = n_inner A = activation_function A = resid_pdrop A = embd_pdrop A = attn_pdrop A = layer_norm_epsilon A = initializer_range A = scale_attn_weights A = use_cache A = attention_softmax_in_fpaa A = scale_attention_softmax_in_fpaa A = multi_query A = bos_token_id A = eos_token_id super().__init__(bos_token_id=A_ ,eos_token_id=A_ ,**A_ )
74
0
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = Dict[str, Any] lowerCAmelCase_ = List[Prediction] @add_end_docstrings(__A ) class snake_case_ ( __A ): '''simple docstring''' def __init__( self : str , *_UpperCamelCase : Tuple , **_UpperCamelCase : Optional[Any] ) ->int: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , '''vision''' ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def snake_case__( self : Optional[Any] , **_UpperCamelCase : Any ) ->Optional[int]: snake_case_ = {} if "threshold" in kwargs: snake_case_ = kwargs['''threshold'''] return {}, {}, postprocess_kwargs def __call__( self : str , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Tuple ) ->Union[Predictions, List[Prediction]]: return super().__call__(*_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : Any , _UpperCamelCase : str ) ->Union[str, Any]: snake_case_ = load_image(_UpperCamelCase ) snake_case_ = torch.IntTensor([[image.height, image.width]] ) snake_case_ = self.image_processor(images=[image] , return_tensors='''pt''' ) if self.tokenizer is not None: snake_case_ = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' ) snake_case_ = target_size return inputs def snake_case__( self : int , _UpperCamelCase : List[str] ) ->List[Any]: snake_case_ = model_inputs.pop('''target_size''' ) snake_case_ = self.model(**_UpperCamelCase ) snake_case_ = outputs.__class__({'''target_size''': target_size, **outputs} ) if self.tokenizer is not None: snake_case_ = model_inputs['''bbox'''] return model_outputs def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : List[str]=0.9 ) ->List[Any]: snake_case_ = model_outputs['''target_size'''] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. snake_case_, snake_case_ = target_size[0].tolist() def unnormalize(_UpperCamelCase : int ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1_0_0_0), (height * bbox[1] / 1_0_0_0), (width * bbox[2] / 1_0_0_0), (height * bbox[3] / 1_0_0_0), ] ) ) snake_case_, snake_case_ = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) snake_case_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] snake_case_ = [unnormalize(_UpperCamelCase ) for bbox in model_outputs['''bbox'''].squeeze(0 )] snake_case_ = ['''score''', '''label''', '''box'''] snake_case_ = [dict(zip(_UpperCamelCase , _UpperCamelCase ) ) for vals in zip(scores.tolist() , _UpperCamelCase , _UpperCamelCase ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel snake_case_ = self.image_processor.post_process_object_detection(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) snake_case_ = raw_annotations[0] snake_case_ = raw_annotation['''scores'''] snake_case_ = raw_annotation['''labels'''] snake_case_ = raw_annotation['''boxes'''] snake_case_ = scores.tolist() snake_case_ = [self.model.config.idalabel[label.item()] for label in labels] snake_case_ = [self._get_bounding_box(_UpperCamelCase ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] snake_case_ = ['''score''', '''label''', '''box'''] snake_case_ = [ dict(zip(_UpperCamelCase , _UpperCamelCase ) ) for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] ) ] return annotation def snake_case__( self : List[Any] , _UpperCamelCase : "torch.Tensor" ) ->Dict[str, int]: if self.framework != "pt": raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' ) snake_case_, snake_case_, snake_case_, snake_case_ = box.int().tolist() snake_case_ = { '''xmin''': xmin, '''ymin''': ymin, '''xmax''': xmax, '''ymax''': ymax, } return bbox
8
"""simple docstring""" import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed _lowercase = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) _lowercase = '''sshleifer/student_marian_en_ro_6_1''' _lowercase = '''sshleifer/tiny-mbart''' @require_torch class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any]=False ,A_ : Optional[int]=None ,A_ : List[str]=True ,A_ : Tuple=True ,A_ : Union[str, Any]=True ,A_ : List[str]=True ,) -> Tuple: A = self.run_trainer( eval_steps=1 ,max_len=12 ,model_name=A_ ,num_train_epochs=1 ,distributed=A_ ,extra_args_str=A_ ,predict_with_generate=A_ ,do_train=A_ ,do_eval=A_ ,do_predict=A_ ,) A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history if not do_eval: return A = [log for log in logs if 'eval_loss' in log.keys()] A = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats A = eval_metrics[-1] assert isinstance(last_step_stats['eval_bleu'] ,A_ ) assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: self.run_seqaseq_quick() @require_torch_multi_gpu def _SCREAMING_SNAKE_CASE ( self : int ) -> int: self.run_seqaseq_quick(distributed=A_ ) @require_torch_multi_gpu def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: self.run_seqaseq_quick(distributed=A_ ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple' ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple --fp16' ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2' ,predict_with_generate=A_ ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: self.run_seqaseq_quick( distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2 --fp16' ,predict_with_generate=A_ ) @require_apex @require_torch_gpu def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' ) @parameterized.expand(['base', 'low', 'high', 'mixed'] ) @require_torch_multi_gpu def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Dict ) -> List[str]: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout A = { # test with the default log_level - should be info and thus log info once 'base': {'extra_args_str': '', 'n_matches': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes 'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica 'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1}, # test with high log_level and log_level_replica - should be quiet on all processes 'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0}, } A = experiments[experiment_id] A = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False} A = 'Running training' with CaptureStderr() as cl: self.run_seqaseq_quick(**A_ ,extra_args_str=data['extra_args_str'] ) A = len(re.findall(A_ ,cl.err ) ) self.assertEqual(A_ ,data['n_matches'] ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: A = self.run_trainer( eval_steps=2 ,max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=10 ,distributed=A_ ,) # Check metrics A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history A = [log for log in logs if 'eval_loss' in log.keys()] A = eval_metrics[0] A = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['eval_bleu'] ,A_ ) # test if do_predict saves generations and metrics A = os.listdir(A_ ) A = {os.path.basename(A_ ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: from transformers.training_args import OptimizerNames def train_and_return_metrics(A_ : str ) -> Tuple[int, float]: A = '--skip_memory_metrics 0' A = self.run_trainer( max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=1 ,optim=A_ ,distributed=A_ ,extra_args_str=A_ ,do_eval=A_ ,do_predict=A_ ,n_gpus_to_use=1 ,) # Check metrics A = TrainerState.load_from_json(Path(A_ ,'trainer_state.json' ) ).log_history A = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 ) A = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 ) A = logs[0]['train_loss'] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) A = gpu_alloc_mem_orig - gpu_alloc_mem_bnb A = gpu_peak_mem_orig + gpu_alloc_mem_orig A = gpu_peak_mem_bnb + gpu_alloc_mem_bnb A = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings A = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( A_ ,A_ ,'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got' F' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and' F' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' ,) self.assertGreater( A_ ,A_ ,'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got' F' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and' F' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' ,) self.assertEqual( A_ ,A_ ,F'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ,A_ : str ,A_ : int ,A_ : float = 3e-3 ,A_ : str = "adafactor" ,A_ : bool = False ,A_ : str = None ,A_ : int = 0 ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : int = None ,) -> Dict: A = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro' A = self.get_auto_remove_tmp_dir() A = F'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split() A = F'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A_ )}\n '.split() A = '\n --do_predict\n '.split() A = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F'--optim {optim}'.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: A = get_gpu_count() A = get_torch_dist_unique_port() A = F'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split() A = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(A_ ,env=self.get_env() ) else: A = ['run_translation.py'] + args with patch.object(A_ ,'argv' ,A_ ): main() return output_dir
74
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCAmelCase : Optional[Any] =logging.get_logger(__name__) __lowerCAmelCase : List[Any] ={ 'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json', } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = '''blip_2_vision_model''' def __init__( self :Dict , lowerCAmelCase__ :int=1_408 , lowerCAmelCase__ :str=6_144 , lowerCAmelCase__ :Union[str, Any]=39 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Any=224 , lowerCAmelCase__ :Optional[Any]=14 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Optional[int]=0.0_0001 , lowerCAmelCase__ :Union[str, Any]=0.0 , lowerCAmelCase__ :str=1E-1_0 , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :int , ) -> Union[str, Any]: super().__init__(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size __SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size __SCREAMING_SNAKE_CASE : Any = num_hidden_layers __SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = patch_size __SCREAMING_SNAKE_CASE : Any = image_size __SCREAMING_SNAKE_CASE : List[Any] = initializer_range __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout __SCREAMING_SNAKE_CASE : Dict = layer_norm_eps __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act __SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias @classmethod def __magic_name__( cls :int , lowerCAmelCase__ :Union[str, os.PathLike] , **lowerCAmelCase__ :Optional[Any] ) -> "PretrainedConfig": cls._set_token_in_kwargs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ ) # get the vision config dict if we are loading from Blip2Config if config_dict.get('''model_type''' ) == "blip-2": __SCREAMING_SNAKE_CASE : int = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ ) class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = '''blip_2_qformer''' def __init__( self :List[str] , lowerCAmelCase__ :Optional[Any]=30_522 , lowerCAmelCase__ :Tuple=768 , lowerCAmelCase__ :List[Any]=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Dict=3_072 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :Tuple=512 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :str=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Dict="absolute" , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :List[Any]=1_408 , **lowerCAmelCase__ :int , ) -> Optional[Any]: super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size __SCREAMING_SNAKE_CASE : Any = num_hidden_layers __SCREAMING_SNAKE_CASE : str = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = hidden_act __SCREAMING_SNAKE_CASE : Any = intermediate_size __SCREAMING_SNAKE_CASE : int = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : int = initializer_range __SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps __SCREAMING_SNAKE_CASE : Optional[int] = position_embedding_type __SCREAMING_SNAKE_CASE : Tuple = cross_attention_frequency __SCREAMING_SNAKE_CASE : Optional[Any] = encoder_hidden_size @classmethod def __magic_name__( cls :Dict , lowerCAmelCase__ :Union[str, os.PathLike] , **lowerCAmelCase__ :Any ) -> "PretrainedConfig": cls._set_token_in_kwargs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get('''model_type''' ) == "blip-2": __SCREAMING_SNAKE_CASE : Dict = config_dict['''qformer_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ ) class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = '''blip-2''' SCREAMING_SNAKE_CASE__ : Any = True def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :int=None , lowerCAmelCase__ :List[str]=32 , **lowerCAmelCase__ :Optional[int] ) -> Optional[int]: super().__init__(**lowerCAmelCase__ ) if vision_config is None: __SCREAMING_SNAKE_CASE : Optional[Any] = {} logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' ) if qformer_config is None: __SCREAMING_SNAKE_CASE : Optional[int] = {} logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' ) if text_config is None: __SCREAMING_SNAKE_CASE : Optional[int] = {} logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = BlipaVisionConfig(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = BlipaQFormerConfig(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt''' __SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAPPING[text_model_type](**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = self.text_config.tie_word_embeddings __SCREAMING_SNAKE_CASE : Tuple = self.text_config.is_encoder_decoder __SCREAMING_SNAKE_CASE : Any = num_query_tokens __SCREAMING_SNAKE_CASE : int = self.vision_config.hidden_size __SCREAMING_SNAKE_CASE : str = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __SCREAMING_SNAKE_CASE : Optional[Any] = 1.0 __SCREAMING_SNAKE_CASE : List[Any] = 0.02 @classmethod def __magic_name__( cls :Union[str, Any] , lowerCAmelCase__ :BlipaVisionConfig , lowerCAmelCase__ :BlipaQFormerConfig , lowerCAmelCase__ :PretrainedConfig , **lowerCAmelCase__ :int , ) -> Optional[Any]: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , ) def __magic_name__( self :int ) -> Dict: __SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(self.__dict__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.vision_config.to_dict() __SCREAMING_SNAKE_CASE : List[str] = self.qformer_config.to_dict() __SCREAMING_SNAKE_CASE : Dict = self.text_config.to_dict() __SCREAMING_SNAKE_CASE : Tuple = self.__class__.model_type return output
9
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''facebook/deit-base-distilled-patch16-224''': ( '''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json''' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''deit''' def __init__( self : int ,A_ : Optional[Any]=768 ,A_ : Union[str, Any]=12 ,A_ : Dict=12 ,A_ : int=3072 ,A_ : Optional[Any]="gelu" ,A_ : Dict=0.0 ,A_ : Any=0.0 ,A_ : str=0.02 ,A_ : Tuple=1e-12 ,A_ : Union[str, Any]=224 ,A_ : Optional[Any]=16 ,A_ : List[Any]=3 ,A_ : Optional[Any]=True ,A_ : Optional[int]=16 ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(**A_ ) A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = encoder_stride class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: int = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> float: return 1e-4
74
0
from math import ceil, sqrt def lowerCAmelCase_ ( __a = 1000000 ) -> int: """simple docstring""" lowerCamelCase__: Optional[int] =0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: lowerCamelCase__: Dict =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: lowerCamelCase__: str =1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f'{solution() = }')
10
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int]=0.999 , snake_case__ : Union[str, Any]="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case__ : Union[str, Any] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case__ : Dict ): return math.exp(t * -12.0 ) else: raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' ) A = [] for i in range(snake_case__ ): A = i / num_diffusion_timesteps A = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) ) return torch.tensor(snake_case__ , dtype=torch.floataa ) class lowerCAmelCase_ ( _lowercase , _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = [e.name for e in KarrasDiffusionSchedulers] _lowerCamelCase: Optional[Any] = 2 @register_to_config def __init__( self : str ,A_ : int = 1000 ,A_ : float = 0.0_00_85 ,A_ : float = 0.0_12 ,A_ : str = "linear" ,A_ : Optional[Union[np.ndarray, List[float]]] = None ,A_ : str = "epsilon" ,A_ : Optional[bool] = False ,A_ : Optional[bool] = False ,A_ : float = 1.0 ,A_ : str = "linspace" ,A_ : int = 0 ,) -> List[str]: if trained_betas is not None: A = torch.tensor(A_ ,dtype=torch.floataa ) elif beta_schedule == "linear": A = torch.linspace(A_ ,A_ ,A_ ,dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. A = ( torch.linspace(beta_start**0.5 ,beta_end**0.5 ,A_ ,dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule A = betas_for_alpha_bar(A_ ,alpha_transform_type='cosine' ) elif beta_schedule == "exp": A = betas_for_alpha_bar(A_ ,alpha_transform_type='exp' ) else: raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' ) A = 1.0 - self.betas A = torch.cumprod(self.alphas ,dim=0 ) # set all values self.set_timesteps(A_ ,A_ ,A_ ) A = use_karras_sigmas def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : Tuple=None ) -> Tuple: if schedule_timesteps is None: A = self.timesteps A = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: A = 1 if len(A_ ) > 1 else 0 else: A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep A = self._index_counter[timestep_int] return indices[pos].item() @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : torch.FloatTensor ,A_ : Union[float, torch.FloatTensor] ,) -> torch.FloatTensor: A = self.index_for_timestep(A_ ) A = self.sigmas[step_index] A = sample / ((sigma**2 + 1) ** 0.5) return sample def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : Union[str, torch.device] = None ,A_ : Optional[int] = None ,) -> Optional[Any]: A = num_inference_steps A = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": A = np.linspace(0 ,num_train_timesteps - 1 ,A_ ,dtype=A_ )[::-1].copy() elif self.config.timestep_spacing == "leading": A = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 A = (np.arange(0 ,A_ ) * step_ratio).round()[::-1].copy().astype(A_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": A = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 A = (np.arange(A_ ,0 ,-step_ratio )).round().copy().astype(A_ ) timesteps -= 1 else: raise ValueError( F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' ) A = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) A = np.log(A_ ) A = np.interp(A_ ,np.arange(0 ,len(A_ ) ) ,A_ ) if self.config.use_karras_sigmas: A = self._convert_to_karras(in_sigmas=A_ ,num_inference_steps=self.num_inference_steps ) A = np.array([self._sigma_to_t(A_ ,A_ ) for sigma in sigmas] ) A = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) A = torch.from_numpy(A_ ).to(device=A_ ) A = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) A = torch.from_numpy(A_ ) A = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(A_ ).startswith('mps' ): # mps does not support float64 A = timesteps.to(A_ ,dtype=torch.floataa ) else: A = timesteps.to(device=A_ ) # empty dt and derivative A = None A = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter A = defaultdict(A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : List[str] ) -> Dict: # get log sigma A = np.log(A_ ) # get distribution A = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range A = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) A = low_idx + 1 A = log_sigmas[low_idx] A = log_sigmas[high_idx] # interpolate sigmas A = (low - log_sigma) / (low - high) A = np.clip(A_ ,0 ,1 ) # transform interpolation to time range A = (1 - w) * low_idx + w * high_idx A = t.reshape(sigma.shape ) return t def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : torch.FloatTensor ,A_ : int ) -> torch.FloatTensor: A = in_sigmas[-1].item() A = in_sigmas[0].item() A = 7.0 # 7.0 is the value used in the paper A = np.linspace(0 ,1 ,A_ ) A = sigma_min ** (1 / rho) A = sigma_max ** (1 / rho) A = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: return self.dt is None def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : Union[float, torch.FloatTensor] ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : bool = True ,) -> Union[SchedulerOutput, Tuple]: A = self.index_for_timestep(A_ ) # advance index counter by 1 A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: A = self.sigmas[step_index] A = self.sigmas[step_index + 1] else: # 2nd order / Heun's method A = self.sigmas[step_index - 1] A = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API A = 0 A = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": A = sigma_hat if self.state_in_first_order else sigma_next A = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": A = sigma_hat if self.state_in_first_order else sigma_next A = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": A = model_output else: raise ValueError( F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' ) if self.config.clip_sample: A = pred_original_sample.clamp( -self.config.clip_sample_range ,self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order A = (sample - pred_original_sample) / sigma_hat # 3. delta timestep A = sigma_next - sigma_hat # store for 2nd order step A = derivative A = dt A = sample else: # 2. 2nd order / Heun's method A = (sample - pred_original_sample) / sigma_next A = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample A = self.dt A = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" A = None A = None A = None A = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=A_ ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples A = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(A_ ): # mps does not support float64 A = self.timesteps.to(original_samples.device ,dtype=torch.floataa ) A = timesteps.to(original_samples.device ,dtype=torch.floataa ) else: A = self.timesteps.to(original_samples.device ) A = timesteps.to(original_samples.device ) A = [self.index_for_timestep(A_ ,A_ ) for t in timesteps] A = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): A = sigma.unsqueeze(-1 ) A = original_samples + noise * sigma return noisy_samples def __len__( self : Dict ) -> int: return self.config.num_train_timesteps
74
0
def _UpperCAmelCase (UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int ): if len(UpperCamelCase__ ) != len(UpperCamelCase__ ): raise ValueError("The length of profit and weight must be same." ) if max_weight <= 0: raise ValueError("max_weight must greater than zero." ) if any(p < 0 for p in profit ): raise ValueError("Profit can not be negative." ) if any(w < 0 for w in weight ): raise ValueError("Weight can not be negative." ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. _A : Tuple = [p / w for p, w in zip(UpperCamelCase__ , UpperCamelCase__ )] # Creating a copy of the list and sorting profit/weight in ascending order _A : Optional[int] = sorted(UpperCamelCase__ ) # declaring useful variables _A : List[Any] = len(UpperCamelCase__ ) _A : Dict = 0 _A : List[Any] = 0 _A : List[Any] = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight _A : Union[str, Any] = sorted_profit_by_weight[length - i - 1] _A : Optional[int] = profit_by_weight.index(UpperCamelCase__ ) _A : List[str] = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( 'Input profits, weights, and then max_weight (all positive ints) separated by ' 'spaces.' ) lowerCAmelCase__ = [int(x) for x in input('Input profits separated by spaces: ').split()] lowerCAmelCase__ = [int(x) for x in input('Input weights separated by spaces: ').split()] lowerCAmelCase__ = int(input('Max weight allowed: ')) # Function Call calc_profit(profit, weight, max_weight)
11
"""simple docstring""" class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Dict ,A_ : list[int] ) -> None: A = len(A_ ) A = [0] * len_array if len_array > 0: A = array[0] for i in range(1 ,A_ ): A = self.prefix_sum[i - 1] + array[i] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> int: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ) -> bool: A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(A_ ) return False if __name__ == "__main__": import doctest doctest.testmod()
74
0
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs UpperCAmelCase_ = imread(r'digital_image_processing/image_data/lena_small.jpg') UpperCAmelCase_ = cvtColor(img, COLOR_BGR2GRAY) def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = cn.convert_to_negative(A__ ) # assert negative_img array for at least one True assert negative_img.any() def lowerCamelCase__ ( ): '''simple docstring''' with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img: # Work around assertion for response assert str(cc.change_contrast(A__ , 110 ) ).startswith( """<PIL.Image.Image image mode=RGB size=100x100 at""" ) def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 ) # assert ambiguous array for all == True assert canny_img.all() __lowerCamelCase = canny.canny(A__ ) # assert canny array for at least one True assert canny_array.any() def lowerCamelCase__ ( ): '''simple docstring''' assert gg.gaussian_filter(A__ , 5 , sigma=0.9 ).all() def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __lowerCamelCase = conv.img_convolve(A__ , A__ ).astype(A__ ) assert res.any() def lowerCamelCase__ ( ): '''simple docstring''' assert med.median_filter(A__ , 3 ).any() def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase, __lowerCamelCase = sob.sobel_filter(A__ ) assert grad.any() and theta.any() def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = sp.make_sepia(A__ , 20 ) assert sepia.all() def lowerCamelCase__ ( A__ : str = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' __lowerCamelCase = bs.Burkes(imread(A__ , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def lowerCamelCase__ ( A__ : str = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' __lowerCamelCase = rs.NearestNeighbour(imread(A__ , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = """digital_image_processing/image_data/lena.jpg""" # Reading the image and converting it to grayscale. __lowerCamelCase = imread(A__ , 0 ) # Test for get_neighbors_pixel function() return not None __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = image[x_coordinate][y_coordinate] __lowerCamelCase = lbp.get_neighbors_pixel( A__ , A__ , A__ , A__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __lowerCamelCase = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __lowerCamelCase = lbp.local_binary_value(A__ , A__ , A__ ) assert lbp_image.any()
12
"""simple docstring""" import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def _snake_case ( snake_case__ : str , snake_case__ : str ): A = RobertaPreLayerNormConfig.from_pretrained( snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] ) # convert state_dict A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) ) A = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.' ): A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ): continue A = tensor_value A = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ ) model.save_pretrained(snake_case__ ) # convert tokenizer A = AutoTokenizer.from_pretrained(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowercase = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
74
0
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __lowercase : """simple docstring""" def __init__( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str]=14 , lowerCAmelCase__ : Dict=7 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : List[str]=99 , lowerCAmelCase__ : List[str]=32 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Optional[int]=4 , lowerCAmelCase__ : Tuple=4 , lowerCAmelCase__ : Union[str, Any]=37 , lowerCAmelCase__ : Union[str, Any]="gelu" , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Union[str, Any]=512 , lowerCAmelCase__ : Tuple=0.02 , ): SCREAMING_SNAKE_CASE_: Optional[int] = parent SCREAMING_SNAKE_CASE_: Optional[int] = batch_size SCREAMING_SNAKE_CASE_: Dict = seq_length SCREAMING_SNAKE_CASE_: Union[str, Any] = is_training SCREAMING_SNAKE_CASE_: List[Any] = use_input_mask SCREAMING_SNAKE_CASE_: Tuple = use_token_type_ids SCREAMING_SNAKE_CASE_: Any = use_labels SCREAMING_SNAKE_CASE_: Optional[int] = vocab_size SCREAMING_SNAKE_CASE_: Optional[int] = hidden_size SCREAMING_SNAKE_CASE_: int = rotary_dim SCREAMING_SNAKE_CASE_: Optional[int] = num_hidden_layers SCREAMING_SNAKE_CASE_: Optional[Any] = num_attention_heads SCREAMING_SNAKE_CASE_: List[Any] = intermediate_size SCREAMING_SNAKE_CASE_: List[str] = hidden_act SCREAMING_SNAKE_CASE_: Any = hidden_dropout_prob SCREAMING_SNAKE_CASE_: Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_: Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE_: Optional[int] = initializer_range SCREAMING_SNAKE_CASE_: List[str] = None SCREAMING_SNAKE_CASE_: Tuple = vocab_size - 1 SCREAMING_SNAKE_CASE_: Any = vocab_size - 1 SCREAMING_SNAKE_CASE_: Dict = vocab_size - 1 def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_: Any = None if self.use_input_mask: SCREAMING_SNAKE_CASE_: Tuple = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE_: List[Any] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = config_and_inputs SCREAMING_SNAKE_CASE_: List[Any] = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict): SCREAMING_SNAKE_CASE_: Union[str, Any] = 20 SCREAMING_SNAKE_CASE_: Union[str, Any] = model_class_name(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = model.init_cache(input_ids.shape[0] , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4") SCREAMING_SNAKE_CASE_: Optional[int] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) SCREAMING_SNAKE_CASE_: int = model( input_ids[:, :-1] , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4") SCREAMING_SNAKE_CASE_: Union[str, Any] = model( input_ids[:, -1:] , attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Union[str, Any] = model(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}") def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]): SCREAMING_SNAKE_CASE_: Optional[int] = 20 SCREAMING_SNAKE_CASE_: List[Any] = model_class_name(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , ) SCREAMING_SNAKE_CASE_: Optional[int] = model.init_cache(input_ids.shape[0] , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) SCREAMING_SNAKE_CASE_: Dict = model( input_ids[:, :-1] , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4") SCREAMING_SNAKE_CASE_: Optional[Any] = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}") @require_flax class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : int = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () _UpperCAmelCase : Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Dict = FlaxGPTJModelTester(self) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) @tooslow def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Any = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left") SCREAMING_SNAKE_CASE_: Any = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B") SCREAMING_SNAKE_CASE_: Optional[int] = False SCREAMING_SNAKE_CASE_: int = model.config.eos_token_id SCREAMING_SNAKE_CASE_: Union[str, Any] = jax.jit(model.generate) SCREAMING_SNAKE_CASE_: int = jit_generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id).sequences SCREAMING_SNAKE_CASE_: str = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = [ "Hello this is a long string of text.\n\nI'm trying to get the text of the", "Hey, I'm a little late to the party. I'm going to", ] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__) @is_pt_flax_cross_test def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs SCREAMING_SNAKE_CASE_: int = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE_: Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE_: Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = pt_inputs["input_ids"].shape SCREAMING_SNAKE_CASE_: Optional[int] = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Dict = 0 SCREAMING_SNAKE_CASE_: Union[str, Any] = 1 SCREAMING_SNAKE_CASE_: Optional[int] = 0 SCREAMING_SNAKE_CASE_: Optional[int] = 1 SCREAMING_SNAKE_CASE_: Any = pt_model_class(lowerCAmelCase__).eval() SCREAMING_SNAKE_CASE_: Any = model_class(lowerCAmelCase__ , dtype=jnp.floataa) SCREAMING_SNAKE_CASE_: List[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = fx_state with torch.no_grad(): SCREAMING_SNAKE_CASE_: Union[str, Any] = pt_model(**lowerCAmelCase__).to_tuple() SCREAMING_SNAKE_CASE_: List[Any] = fx_model(**lowerCAmelCase__).to_tuple() self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__) , "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = model_class.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = fx_model_loaded(**lowerCAmelCase__).to_tuple() self.assertEqual( len(lowerCAmelCase__) , len(lowerCAmelCase__) , "Output lengths differ between Flax and PyTorch") for fx_output_loaded, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2) @is_pt_flax_cross_test def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs SCREAMING_SNAKE_CASE_: Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE_: int = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE_: Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = pt_model_class(lowerCAmelCase__).eval() SCREAMING_SNAKE_CASE_: Dict = model_class(lowerCAmelCase__ , dtype=jnp.floataa) SCREAMING_SNAKE_CASE_: List[str] = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = pt_inputs["input_ids"].shape SCREAMING_SNAKE_CASE_: List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Optional[Any] = 0 SCREAMING_SNAKE_CASE_: List[str] = 1 SCREAMING_SNAKE_CASE_: Any = 0 SCREAMING_SNAKE_CASE_: List[str] = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): SCREAMING_SNAKE_CASE_: List[str] = pt_model(**lowerCAmelCase__).to_tuple() SCREAMING_SNAKE_CASE_: Union[str, Any] = fx_model(**lowerCAmelCase__).to_tuple() self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__) , "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = pt_model_class.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__) with torch.no_grad(): SCREAMING_SNAKE_CASE_: Optional[Any] = pt_model_loaded(**lowerCAmelCase__).to_tuple() self.assertEqual( len(lowerCAmelCase__) , len(lowerCAmelCase__) , "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) @tooslow def _SCREAMING_SNAKE_CASE ( self : str): for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE_: Dict = model_class_name.from_pretrained("EleutherAI/gpt-j-6B") SCREAMING_SNAKE_CASE_: List[Any] = model(np.ones((1, 1))) self.assertIsNotNone(lowerCAmelCase__)
13
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''roformer''' def __init__( self : Tuple ,A_ : Optional[int]=5_0000 ,A_ : Tuple=None ,A_ : Optional[Any]=768 ,A_ : Dict=12 ,A_ : Optional[int]=12 ,A_ : Union[str, Any]=3072 ,A_ : Dict="gelu" ,A_ : Dict=0.1 ,A_ : List[Any]=0.1 ,A_ : List[Any]=1536 ,A_ : List[str]=2 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : Optional[int]=0 ,A_ : List[str]=False ,A_ : Tuple=True ,**A_ : List[str] ,) -> Dict: super().__init__(pad_token_id=A_ ,**A_ ) A = vocab_size A = hidden_size if embedding_size is None else embedding_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_act A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = initializer_range A = layer_norm_eps A = rotary_value A = use_cache class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": A = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A = {0: 'batch', 1: 'sequence'} A = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
74
0
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin _lowerCamelCase : Optional[Any] = """ Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] """ class UpperCamelCase_ ( unittest.TestCase , UpperCAmelCase__ ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : str) ->Dict: '''simple docstring''' A__ = load_tool('''text-question-answering''') self.tool.setup() A__ = load_tool('''text-question-answering''' , remote=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->str: '''simple docstring''' A__ = self.tool(UpperCAmelCase__ , '''What did Hugging Face do in April 2021?''') self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''') def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str: '''simple docstring''' A__ = self.remote_tool(UpperCAmelCase__ , '''What did Hugging Face do in April 2021?''') self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''') def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = self.tool(text=UpperCAmelCase__ , question='''What did Hugging Face do in April 2021?''') self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''') def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ = self.remote_tool(text=UpperCAmelCase__ , question='''What did Hugging Face do in April 2021?''') self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''')
14
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _snake_case ( snake_case__ : Dict ): A = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) def _snake_case ( snake_case__ : int ): A , A = emb.weight.shape A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) A = emb.weight.data return lin_layer def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ): A = torch.load(snake_case__ , map_location='cpu' )['model'] remove_ignore_keys_(snake_case__ ) A = state_dict['encoder.embed_tokens.weight'].shape[0] A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ ) if mbart_aa and finetuned: A = 'relu' A = state_dict['decoder.embed_tokens.weight'] A = MBartForConditionalGeneration(snake_case__ ) model.model.load_state_dict(snake_case__ ) if finetuned: A = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') _lowercase = parser.parse_args() _lowercase = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
74
0
def UpperCAmelCase ( a_ , a_ , a_ ) -> int: """simple docstring""" if exponent == 1: return base if exponent % 2 == 0: __A = _modexpt(a_ , exponent // 2 , a_ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(a_ , exponent - 1 , a_ )) % modulo_value def UpperCAmelCase ( a_ = 1_7_7_7 , a_ = 1_8_5_5 , a_ = 8 ) -> int: """simple docstring""" __A = base for _ in range(1 , a_ ): __A = _modexpt(a_ , a_ , 1_0**digits ) return result if __name__ == "__main__": print(f'''{solution() = }''')
15
"""simple docstring""" import argparse import struct import unittest class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : bytes ) -> None: A = data # Initialize hash values A = [ 0X6_A_0_9_E_6_6_7, 0XB_B_6_7_A_E_8_5, 0X3_C_6_E_F_3_7_2, 0XA_5_4_F_F_5_3_A, 0X5_1_0_E_5_2_7_F, 0X9_B_0_5_6_8_8_C, 0X1_F_8_3_D_9_A_B, 0X5_B_E_0_C_D_1_9, ] # Initialize round constants A = [ 0X4_2_8_A_2_F_9_8, 0X7_1_3_7_4_4_9_1, 0XB_5_C_0_F_B_C_F, 0XE_9_B_5_D_B_A_5, 0X3_9_5_6_C_2_5_B, 0X5_9_F_1_1_1_F_1, 0X9_2_3_F_8_2_A_4, 0XA_B_1_C_5_E_D_5, 0XD_8_0_7_A_A_9_8, 0X1_2_8_3_5_B_0_1, 0X2_4_3_1_8_5_B_E, 0X5_5_0_C_7_D_C_3, 0X7_2_B_E_5_D_7_4, 0X8_0_D_E_B_1_F_E, 0X9_B_D_C_0_6_A_7, 0XC_1_9_B_F_1_7_4, 0XE_4_9_B_6_9_C_1, 0XE_F_B_E_4_7_8_6, 0X0_F_C_1_9_D_C_6, 0X2_4_0_C_A_1_C_C, 0X2_D_E_9_2_C_6_F, 0X4_A_7_4_8_4_A_A, 0X5_C_B_0_A_9_D_C, 0X7_6_F_9_8_8_D_A, 0X9_8_3_E_5_1_5_2, 0XA_8_3_1_C_6_6_D, 0XB_0_0_3_2_7_C_8, 0XB_F_5_9_7_F_C_7, 0XC_6_E_0_0_B_F_3, 0XD_5_A_7_9_1_4_7, 0X0_6_C_A_6_3_5_1, 0X1_4_2_9_2_9_6_7, 0X2_7_B_7_0_A_8_5, 0X2_E_1_B_2_1_3_8, 0X4_D_2_C_6_D_F_C, 0X5_3_3_8_0_D_1_3, 0X6_5_0_A_7_3_5_4, 0X7_6_6_A_0_A_B_B, 0X8_1_C_2_C_9_2_E, 0X9_2_7_2_2_C_8_5, 0XA_2_B_F_E_8_A_1, 0XA_8_1_A_6_6_4_B, 0XC_2_4_B_8_B_7_0, 0XC_7_6_C_5_1_A_3, 0XD_1_9_2_E_8_1_9, 0XD_6_9_9_0_6_2_4, 0XF_4_0_E_3_5_8_5, 0X1_0_6_A_A_0_7_0, 0X1_9_A_4_C_1_1_6, 0X1_E_3_7_6_C_0_8, 0X2_7_4_8_7_7_4_C, 0X3_4_B_0_B_C_B_5, 0X3_9_1_C_0_C_B_3, 0X4_E_D_8_A_A_4_A, 0X5_B_9_C_C_A_4_F, 0X6_8_2_E_6_F_F_3, 0X7_4_8_F_8_2_E_E, 0X7_8_A_5_6_3_6_F, 0X8_4_C_8_7_8_1_4, 0X8_C_C_7_0_2_0_8, 0X9_0_B_E_F_F_F_A, 0XA_4_5_0_6_C_E_B, 0XB_E_F_9_A_3_F_7, 0XC_6_7_1_7_8_F_2, ] A = self.preprocessing(self.data ) self.final_hash() @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : bytes ) -> bytes: A = B'\x80' + (B'\x00' * (63 - (len(A_ ) + 8) % 64)) A = struct.pack('>Q' ,(len(A_ ) * 8) ) return data + padding + big_endian_integer def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None: # Convert into blocks of 64 bytes A = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers A = list(struct.unpack('>16L' ,A_ ) ) # add 48 0-ed integers words += [0] * 48 A , A , A , A , A , A , A , A = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array A = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) A = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) A = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X1_0_0_0_0_0_0_0_0 # Compression A = self.ror(A_ ,6 ) ^ self.ror(A_ ,11 ) ^ self.ror(A_ ,25 ) A = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g) A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X1_0_0_0_0_0_0_0_0 A = self.ror(A_ ,2 ) ^ self.ror(A_ ,13 ) ^ self.ror(A_ ,22 ) A = (a & b) ^ (a & c) ^ (b & c) A = (sa + maj) % 0X1_0_0_0_0_0_0_0_0 A , A , A , A , A , A , A , A = ( g, f, e, ((d + tempa) % 0X1_0_0_0_0_0_0_0_0), c, b, a, ((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0), ) A = [a, b, c, d, e, f, g, h] # Modify final values A = [ ((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0) for index, element in enumerate(self.hashes ) ] A = ''.join([hex(A_ )[2:].zfill(8 ) for value in self.hashes] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : int ) -> int: return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations) class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None: import hashlib A = bytes('Test String' ,'utf-8' ) self.assertEqual(SHAaaa(A_ ).hash ,hashlib.shaaaa(A_ ).hexdigest() ) def _snake_case ( ): import doctest doctest.testmod() A = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) A = parser.parse_args() A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: A = f.read() else: A = bytes(snake_case__ , 'utf-8' ) print(SHAaaa(snake_case__ ).hash ) if __name__ == "__main__": main()
74
0
"""simple docstring""" import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { 'vocab_file': 'vocab.json', 'tokenizer_config_file': 'tokenizer_config.json', 'merges_file': 'merges.txt', } lowerCAmelCase_ = { 'vocab_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json' ), }, 'tokenizer_config_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json' ), }, 'merges_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt' ), }, } lowerCAmelCase_ = '</w>' lowerCAmelCase_ = '@@ ' def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]: lowercase__ : Any = set() lowercase__ : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase__ : Any = char return pairs # Speech2Text2 has no max input length lowerCAmelCase_ = {'facebook/s2t-wav2vec2-large-en-de': 1_024} class __A ( A_ ): '''simple docstring''' lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES lowerCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : Optional[int] = ["input_ids", "attention_mask"] def __init__( self : int ,_snake_case : Union[str, Any] ,_snake_case : str="<s>" ,_snake_case : List[Any]="<pad>" ,_snake_case : int="</s>" ,_snake_case : str="<unk>" ,_snake_case : str=False ,_snake_case : List[Any]=None ,**_snake_case : Tuple ,) -> int: """simple docstring""" super().__init__( unk_token=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,pad_token=_snake_case ,do_lower_case=_snake_case ,**_snake_case ,) lowercase__ : Optional[int] = do_lower_case with open(_snake_case ,encoding='''utf-8''' ) as vocab_handle: lowercase__ : List[str] = json.load(_snake_case ) lowercase__ : Tuple = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" ) lowercase__ : Union[str, Any] = None lowercase__ : int = None else: with open(_snake_case ,encoding='''utf-8''' ) as merges_handle: lowercase__ : Tuple = merges_handle.read().split('''\n''' )[:-1] lowercase__ : Optional[int] = [tuple(merge.split()[:2] ) for merge in merges] lowercase__ : Tuple = dict(zip(_snake_case ,range(len(_snake_case ) ) ) ) lowercase__ : Any = {} @property def UpperCAmelCase ( self : Optional[int] ) -> int: """simple docstring""" return len(self.decoder ) def UpperCAmelCase ( self : Dict ) -> Dict: """simple docstring""" return dict(self.encoder ,**self.added_tokens_encoder ) def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" lowercase__ : Union[str, Any] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] lowercase__ : List[str] = get_pairs(_snake_case ) if not pairs: return token while True: lowercase__ : List[str] = min(_snake_case ,key=lambda _snake_case : self.bpe_ranks.get(_snake_case ,float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowercase__ , lowercase__ : Tuple = bigram lowercase__ : int = [] lowercase__ : int = 0 while i < len(_snake_case ): try: lowercase__ : Any = word.index(_snake_case ,_snake_case ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase__ : Tuple = j if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase__ : List[Any] = tuple(_snake_case ) lowercase__ : Dict = new_word if len(_snake_case ) == 1: break else: lowercase__ : List[Any] = get_pairs(_snake_case ) lowercase__ : Optional[Any] = ''' '''.join(_snake_case ) if word == "\n " + BPE_TOKEN_MERGES: lowercase__ : Union[str, Any] = '''\n''' + BPE_TOKEN_MERGES if word.endswith(_snake_case ): lowercase__ : Optional[int] = word.replace(_snake_case ,'''''' ) lowercase__ : Union[str, Any] = word.replace(''' ''' ,_snake_case ) lowercase__ : Optional[Any] = word return word def UpperCAmelCase ( self : Optional[int] ,_snake_case : Dict ) -> Optional[int]: """simple docstring""" if self.bpe_ranks is None: raise ValueError( '''This tokenizer was instantiated without a `merges.txt` file, so''' ''' that it can only be used for decoding, not for encoding.''' '''Make sure to provide `merges.txt` file at instantiation to enable ''' '''encoding.''' ) if self.do_lower_case: lowercase__ : Tuple = text.lower() lowercase__ : Optional[Any] = text.split() lowercase__ : Dict = [] for token in text: if token: split_tokens.extend(list(self.bpe(_snake_case ).split(''' ''' ) ) ) return split_tokens def UpperCAmelCase ( self : Optional[Any] ,_snake_case : str ) -> int: """simple docstring""" return self.encoder.get(_snake_case ,self.encoder.get(self.unk_token ) ) def UpperCAmelCase ( self : Dict ,_snake_case : int ) -> str: """simple docstring""" lowercase__ : List[Any] = self.decoder.get(_snake_case ,self.unk_token ) return result def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ) -> str: """simple docstring""" lowercase__ : Optional[int] = ''' '''.join(_snake_case ) # make sure @@ tokens are concatenated lowercase__ : Tuple = ''''''.join(string.split(_snake_case ) ) return string def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_snake_case ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ : str = os.path.join( _snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__ : int = os.path.join( _snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_snake_case ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_snake_case ,ensure_ascii=_snake_case ) + '''\n''' ) lowercase__ : List[str] = 0 if self.bpe_ranks is None: return (vocab_file,) with open(_snake_case ,'''w''' ,encoding='''utf-8''' ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _snake_case : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) lowercase__ : Tuple = token_index writer.write(''' '''.join(_snake_case ) + '''\n''' ) index += 1 return (vocab_file, merges_file)
16
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowercase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''DeiTFeatureExtractor'''] _lowercase = ['''DeiTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DeiTForImageClassification''', '''DeiTForImageClassificationWithTeacher''', '''DeiTForMaskedImageModeling''', '''DeiTModel''', '''DeiTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDeiTForImageClassification''', '''TFDeiTForImageClassificationWithTeacher''', '''TFDeiTForMaskedImageModeling''', '''TFDeiTModel''', '''TFDeiTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
74
0
"""simple docstring""" import inspect import unittest class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Tuple ): try: import diffusers # noqa: F401 except ImportError: assert False def _lowercase ( self : Optional[Any] ): import diffusers from diffusers.dependency_versions_table import deps __lowercase = inspect.getmembers(UpperCAmelCase__, inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": __lowercase = "k-diffusion" elif backend == "invisible_watermark": __lowercase = "invisible-watermark" assert backend in deps, F"""{backend} is not in the deps table!"""
17
"""simple docstring""" from __future__ import annotations import requests def _snake_case ( snake_case__ : str ): A = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty' return requests.get(snake_case__ ).json() def _snake_case ( snake_case__ : int = 10 ): A = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' A = requests.get(snake_case__ ).json()[:max_stories] return [get_hackernews_story(snake_case__ ) for story_id in story_ids] def _snake_case ( snake_case__ : int = 10 ): A = hackernews_top_stories(snake_case__ ) return "\n".join('* [{title}]({url})'.format(**snake_case__ ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
74
0
import random from typing import Any def _snake_case ( lowerCAmelCase : list ): """simple docstring""" for _ in range(len(lowerCAmelCase ) ): SCREAMING_SNAKE_CASE_ : Any = random.randint(0 , len(lowerCAmelCase ) - 1 ) SCREAMING_SNAKE_CASE_ : List[Any] = random.randint(0 , len(lowerCAmelCase ) - 1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = data[b], data[a] return data if __name__ == "__main__": __lowerCamelCase : Optional[int] = [0, 1, 2, 3, 4, 5, 6, 7] __lowerCamelCase : Union[str, Any] = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
18
"""simple docstring""" from string import ascii_uppercase _lowercase = {char: i for i, char in enumerate(ascii_uppercase)} _lowercase = dict(enumerate(ascii_uppercase)) def _snake_case ( snake_case__ : str , snake_case__ : str ): A = len(snake_case__ ) A = 0 while True: if x == i: A = 0 if len(snake_case__ ) == len(snake_case__ ): break key += key[i] i += 1 return key def _snake_case ( snake_case__ : str , snake_case__ : str ): A = '' A = 0 for letter in message: if letter == " ": cipher_text += " " else: A = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def _snake_case ( snake_case__ : str , snake_case__ : str ): A = '' A = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: A = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def _snake_case ( ): A = 'THE GERMAN ATTACK' A = 'SECRET' A = generate_key(snake_case__ , snake_case__ ) A = cipher_text(snake_case__ , snake_case__ ) print(F'Encrypted Text = {s}' ) print(F'Original Text = {original_text(snake_case__ , snake_case__ )}' ) if __name__ == "__main__": import doctest doctest.testmod() main()
74
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def SCREAMING_SNAKE_CASE_( self ) -> int: lowerCamelCase_ = tempfile.mkdtemp() lowerCamelCase_ = BlipImageProcessor() lowerCamelCase_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" ) lowerCamelCase_ = BlipaProcessor(lowercase , lowercase ) processor.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> Union[str, Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> str: return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase_ = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]: lowerCamelCase_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowerCamelCase_ = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 ) lowerCamelCase_ = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> str: lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase ) lowerCamelCase_ = self.prepare_image_inputs() lowerCamelCase_ = image_processor(lowercase , return_tensors="np" ) lowerCamelCase_ = processor(images=lowercase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase ) lowerCamelCase_ = "lower newer" lowerCamelCase_ = processor(text=lowercase ) lowerCamelCase_ = tokenizer(lowercase , return_token_type_ids=lowercase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]: lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase ) lowerCamelCase_ = "lower newer" lowerCamelCase_ = self.prepare_image_inputs() lowerCamelCase_ = processor(text=lowercase , images=lowercase ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(lowercase ): processor() def SCREAMING_SNAKE_CASE_( self ) -> Tuple: lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase ) lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase_ = processor.batch_decode(lowercase ) lowerCamelCase_ = tokenizer.batch_decode(lowercase ) self.assertListEqual(lowercase , lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase ) lowerCamelCase_ = "lower newer" lowerCamelCase_ = self.prepare_image_inputs() lowerCamelCase_ = processor(text=lowercase , images=lowercase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
19
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ) -> Optional[Any]: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): A = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: A = 'sgugger/tiny-distilbert-classification' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,only_pretrain_model=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,torchscript=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,fpaa=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: A = 'sshleifer/tiny-gpt2' A = AutoConfig.from_pretrained(A_ ) # set architectures equal to `None` A = None A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: A = 'sshleifer/tiny-gpt2' A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=A_ ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = 'sshleifer/tiny-gpt2' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = 'sshleifer/tinier_bart' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'sshleifer/tiny-gpt2' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'sshleifer/tinier_bart' A = AutoConfig.from_pretrained(A_ ) A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ,configs=[config] ) A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: A = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,save_to_csv=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(A_ ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(A_ ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(A_ ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(A_ ,'train_time.csv' ) ,env_info_csv_file=os.path.join(A_ ,'env.csv' ) ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) benchmark.run() self.assertTrue(Path(os.path.join(A_ ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'train_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'train_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ ,'env.csv' ) ).exists() ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: A = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(A_ : Optional[int] ): self.assertTrue(hasattr(A_ ,'sequential' ) ) self.assertTrue(hasattr(A_ ,'cumulative' ) ) self.assertTrue(hasattr(A_ ,'current' ) ) self.assertTrue(hasattr(A_ ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: A = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(A_ ,'log.txt' ) ,log_print=A_ ,trace_memory_line_by_line=A_ ,multi_process=A_ ,) A = PyTorchBenchmark(A_ ) A = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(A_ ,'log.txt' ) ).exists() )
74
0