code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import os # Precomputes a list of the 100 first triangular numbers snake_case = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.dirname(os.path.realpath(lowercase ) ) SCREAMING_SNAKE_CASE : str = os.path.join(lowercase , "words.txt" ) SCREAMING_SNAKE_CASE : str = "" with open(lowercase ) as f: SCREAMING_SNAKE_CASE : Dict = f.readline() SCREAMING_SNAKE_CASE : Optional[int] = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )] SCREAMING_SNAKE_CASE : Dict = [ word for word in [sum(ord(lowercase ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(lowercase ) if __name__ == "__main__": print(solution())
319
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert isinstance(lowercase , lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache" SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : Optional[int] = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path elif issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ): """simple docstring""" assert isinstance(lowercase , lowercase ) for split in splits: SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tmp_path / "cache" SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : str = ParquetDatasetReader( {"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if split: SCREAMING_SNAKE_CASE : Any = {split: parquet_path} else: SCREAMING_SNAKE_CASE : Tuple = "train" SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path} SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" ) SCREAMING_SNAKE_CASE : List[Any] = pf.read() assert dataset.data.table == output_table def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]} SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} ) SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert get_writer_batch_size(lowercase ) == expected
319
1
from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" ) SCREAMING_SNAKE_CASE : Optional[Any] = parser.add_subparsers(help="transformers-cli command helpers" ) # Register commands ConvertCommand.register_subcommand(lowercase ) DownloadCommand.register_subcommand(lowercase ) EnvironmentCommand.register_subcommand(lowercase ) RunCommand.register_subcommand(lowercase ) ServeCommand.register_subcommand(lowercase ) UserCommands.register_subcommand(lowercase ) AddNewModelCommand.register_subcommand(lowercase ) AddNewModelLikeCommand.register_subcommand(lowercase ) LfsCommands.register_subcommand(lowercase ) PTtoTFCommand.register_subcommand(lowercase ) # Let's go SCREAMING_SNAKE_CASE : Any = parser.parse_args() if not hasattr(lowercase , "func" ): parser.print_help() exit(1 ) # Run SCREAMING_SNAKE_CASE : int = args.func(lowercase ) service.run() if __name__ == "__main__": main()
319
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FocalNetForImageClassification""", """FocalNetForMaskedImageModeling""", """FocalNetBackbone""", """FocalNetModel""", """FocalNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
import math from datetime import datetime, timedelta def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = year % 19 SCREAMING_SNAKE_CASE : List[str] = year % 4 SCREAMING_SNAKE_CASE : str = year % 7 SCREAMING_SNAKE_CASE : List[Any] = math.floor(year / 100 ) SCREAMING_SNAKE_CASE : int = math.floor((13 + 8 * leap_day_inhibits) / 25 ) SCREAMING_SNAKE_CASE : List[str] = leap_day_inhibits / 4 SCREAMING_SNAKE_CASE : Dict = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 SCREAMING_SNAKE_CASE : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 SCREAMING_SNAKE_CASE : Union[str, Any] = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon SCREAMING_SNAKE_CASE : List[str] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(lowercase , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(lowercase , 4 , 18 ) else: return datetime(lowercase , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_994, 2_000, 2_010, 2_021, 2_023): snake_case = """will be""" if year > datetime.now().year else """was""" print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
319
def lowerCamelCase__ ( lowercase , lowercase = 0 ): """simple docstring""" SCREAMING_SNAKE_CASE : int = length or len(lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i] SCREAMING_SNAKE_CASE : str = True return list_data if not swapped else bubble_sort(lowercase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
import os import time import numpy as np import onnxruntime as ort snake_case = """1""" snake_case = """0""" snake_case = """1""" snake_case = ort.SessionOptions() snake_case = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print("""Create inference session...""") snake_case = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""] snake_case = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider) snake_case = ort.RunOptions() snake_case = 128 snake_case = 1 snake_case = np.ones((batch, sequence), dtype=np.intaa) snake_case = np.ones((batch, sequence), dtype=np.intaa) snake_case = np.ones((batch, sequence), dtype=np.intaa) print("""Warm up phase...""") sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("""Start inference...""") snake_case = time.time() snake_case = 2_000 snake_case = {} for iter in range(max_iters): snake_case = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1_000 / max_iters))
319
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger snake_case = get_logger(__name__) snake_case = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : str , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[int] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ): for processor in self: SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(processor.__call__ ).parameters if len(UpperCAmelCase_ ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' f'''{processor.__class__} are passed to the logits processor.''' ) SCREAMING_SNAKE_CASE : int = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) else: SCREAMING_SNAKE_CASE : Dict = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : float ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not (temperature > 0): raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' ) SCREAMING_SNAKE_CASE : Optional[int] = temperature def __call__( self : List[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = scores / self.temperature return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : float , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (min_tokens_to_keep < 1): raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) SCREAMING_SNAKE_CASE : Optional[int] = top_p SCREAMING_SNAKE_CASE : str = filter_value SCREAMING_SNAKE_CASE : List[str] = min_tokens_to_keep def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = lax.top_k(UpperCAmelCase_ , scores.shape[-1] ) SCREAMING_SNAKE_CASE : str = jnp.full_like(UpperCAmelCase_ , self.filter_value ) SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.softmax(UpperCAmelCase_ , axis=-1 ).cumsum(axis=-1 ) SCREAMING_SNAKE_CASE : Tuple = cumulative_probs < self.top_p # include the token that is higher than top_p as well SCREAMING_SNAKE_CASE : Optional[int] = jnp.roll(UpperCAmelCase_ , 1 ) score_mask |= score_mask.at[:, 0].set(UpperCAmelCase_ ) # min tokens to keep SCREAMING_SNAKE_CASE : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = jnp.where(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jax.lax.sort_key_val(UpperCAmelCase_ , UpperCAmelCase_ )[-1] return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or top_k <= 0: raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) SCREAMING_SNAKE_CASE : List[str] = max(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = filter_value def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = scores.shape SCREAMING_SNAKE_CASE : List[str] = jnp.full(batch_size * vocab_size , self.filter_value ) SCREAMING_SNAKE_CASE : List[str] = min(self.top_k , scores.shape[-1] ) # Safety check SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = lax.top_k(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.broadcast_to((jnp.arange(UpperCAmelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() SCREAMING_SNAKE_CASE : List[str] = topk_scores.flatten() SCREAMING_SNAKE_CASE : List[Any] = topk_indices.flatten() + shift SCREAMING_SNAKE_CASE : Dict = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = next_scores_flat.reshape(UpperCAmelCase_ , UpperCAmelCase_ ) return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = bos_token_id def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.bool_(cur_len - 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = max_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : List[str] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : str = 1 - jnp.bool_(cur_len - self.max_length + 1 ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or min_length < 0: raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or eos_token_id < 0: raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) SCREAMING_SNAKE_CASE : List[str] = min_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): # create boolean flag to decide if min length penalty should be applied SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(UpperCAmelCase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = begin_index def __call__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index ) SCREAMING_SNAKE_CASE : List[str] = jnp.where(UpperCAmelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : List[Any] = list(UpperCAmelCase_ ) def __call__( self : Any , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Tuple = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : List[Any] = dict(UpperCAmelCase_ ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: SCREAMING_SNAKE_CASE : Any = force_token_array.at[index].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = jnp.intaa(UpperCAmelCase_ ) def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): def _force_token(UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : List[str] = scores.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = self.force_token_array[generation_idx] SCREAMING_SNAKE_CASE : Tuple = jnp.ones_like(UpperCAmelCase_ , dtype=scores.dtype ) * -float("inf" ) SCREAMING_SNAKE_CASE : Dict = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = lax.dynamic_update_slice(UpperCAmelCase_ , UpperCAmelCase_ , (0, current_token) ) return new_scores SCREAMING_SNAKE_CASE : Any = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase_ ) , lambda: scores , ) , ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Union[str, Any] = generate_config.eos_token_id SCREAMING_SNAKE_CASE : Tuple = generate_config.no_timestamps_token_id SCREAMING_SNAKE_CASE : List[Any] = generate_config.no_timestamps_token_id + 1 SCREAMING_SNAKE_CASE : Dict = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(UpperCAmelCase_ , "max_initial_timestamp_index" ): SCREAMING_SNAKE_CASE : List[Any] = generate_config.max_initial_timestamp_index else: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size if self.max_initial_timestamp_index is None: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size def __call__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): # suppress <|notimestamps|> which is handled by without_timestamps SCREAMING_SNAKE_CASE : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase_ , UpperCAmelCase_ , ) return jnp.where( UpperCAmelCase_ , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(cur_len == self.begin_index , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = self.timestamp_begin + self.max_initial_timestamp_index SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where( UpperCAmelCase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCAmelCase_ , ) # if sum of probability over timestamps is above any other token, sample timestamp SCREAMING_SNAKE_CASE : List[Any] = jax.nn.log_softmax(UpperCAmelCase_ , axis=-1 ) def handle_cumulative_probs(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) return scores
319
1
from queue import PriorityQueue from typing import Any import numpy as np def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ): """simple docstring""" for nxt, d in graph[v]: if nxt in visited_forward: continue SCREAMING_SNAKE_CASE : Optional[int] = cst_fwd.get(lowercase , np.inf ) SCREAMING_SNAKE_CASE : Optional[int] = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) SCREAMING_SNAKE_CASE : str = new_cost_f SCREAMING_SNAKE_CASE : int = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: SCREAMING_SNAKE_CASE : List[Any] = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = -1 SCREAMING_SNAKE_CASE : Any = set() SCREAMING_SNAKE_CASE : Tuple = set() SCREAMING_SNAKE_CASE : List[Any] = {source: 0} SCREAMING_SNAKE_CASE : List[Any] = {destination: 0} SCREAMING_SNAKE_CASE : Dict = {source: None} SCREAMING_SNAKE_CASE : Any = {destination: None} SCREAMING_SNAKE_CASE : PriorityQueue[Any] = PriorityQueue() SCREAMING_SNAKE_CASE : PriorityQueue[Any] = PriorityQueue() SCREAMING_SNAKE_CASE : Dict = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = queue_forward.get() visited_forward.add(lowercase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = queue_backward.get() visited_backward.add(lowercase ) SCREAMING_SNAKE_CASE : List[str] = pass_and_relaxation( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) SCREAMING_SNAKE_CASE : Tuple = pass_and_relaxation( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: SCREAMING_SNAKE_CASE : Union[str, Any] = shortest_distance return shortest_path_distance snake_case = { """B""": [["""C""", 1]], """C""": [["""D""", 1]], """D""": [["""F""", 1]], """E""": [["""B""", 1], ["""G""", 2]], """F""": [], """G""": [["""F""", 1]], } snake_case = { """B""": [["""E""", 1]], """C""": [["""B""", 1]], """D""": [["""C""", 1]], """F""": [["""D""", 1], ["""G""", 1]], """E""": [[None, np.inf]], """G""": [["""E""", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
319
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys snake_case = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
319
1
from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = ['''keras_nlp'''] def __init__( self : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any ): requires_backends(self , ["keras_nlp"] )
319
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
319
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType snake_case = logging.get_logger(__name__) snake_case = { """openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""", } # fmt: off snake_case = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377, 1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211, 4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786, 11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791, 17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409, 34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361 ] snake_case = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627, 3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647, 7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793, 14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675, 22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865, 42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362 ] class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = '''whisper''' UpperCamelCase_ : Any = ['''past_key_values'''] UpperCamelCase_ : Dict = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : str , UpperCAmelCase_ : List[str]=5_1865 , UpperCAmelCase_ : Union[str, Any]=80 , UpperCAmelCase_ : Tuple=6 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Union[str, Any]=6 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Optional[int]=1536 , UpperCAmelCase_ : List[str]=1536 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : List[Any]=5_0257 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[Any]=1500 , UpperCAmelCase_ : Optional[int]=448 , UpperCAmelCase_ : Any=5_0256 , UpperCAmelCase_ : Any=5_0256 , UpperCAmelCase_ : Dict=5_0256 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[str]=[220, 5_0256] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=256 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=0.05 , UpperCAmelCase_ : str=10 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : List[Any]=10 , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : int=7 , **UpperCAmelCase_ : Any , ): SCREAMING_SNAKE_CASE : str = vocab_size SCREAMING_SNAKE_CASE : Optional[int] = num_mel_bins SCREAMING_SNAKE_CASE : List[Any] = d_model SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layers SCREAMING_SNAKE_CASE : Tuple = encoder_attention_heads SCREAMING_SNAKE_CASE : List[str] = decoder_layers SCREAMING_SNAKE_CASE : int = decoder_attention_heads SCREAMING_SNAKE_CASE : Dict = decoder_ffn_dim SCREAMING_SNAKE_CASE : Tuple = encoder_ffn_dim SCREAMING_SNAKE_CASE : Any = dropout SCREAMING_SNAKE_CASE : List[str] = attention_dropout SCREAMING_SNAKE_CASE : List[str] = activation_dropout SCREAMING_SNAKE_CASE : List[Any] = activation_function SCREAMING_SNAKE_CASE : Dict = init_std SCREAMING_SNAKE_CASE : Any = encoder_layerdrop SCREAMING_SNAKE_CASE : List[Any] = decoder_layerdrop SCREAMING_SNAKE_CASE : Optional[int] = use_cache SCREAMING_SNAKE_CASE : Any = encoder_layers SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True SCREAMING_SNAKE_CASE : Optional[Any] = max_source_positions SCREAMING_SNAKE_CASE : int = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. SCREAMING_SNAKE_CASE : Tuple = classifier_proj_size SCREAMING_SNAKE_CASE : int = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE : Any = apply_spec_augment SCREAMING_SNAKE_CASE : Any = mask_time_prob SCREAMING_SNAKE_CASE : Optional[Any] = mask_time_length SCREAMING_SNAKE_CASE : Any = mask_time_min_masks SCREAMING_SNAKE_CASE : Optional[int] = mask_feature_prob SCREAMING_SNAKE_CASE : Optional[int] = mask_feature_length SCREAMING_SNAKE_CASE : Dict = mask_feature_min_masks SCREAMING_SNAKE_CASE : Tuple = median_filter_width super().__init__( pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , suppress_tokens=UpperCAmelCase_ , begin_suppress_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' @property def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : List[Any] = OrderedDict( [ ("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}), ] ) if self.use_past: SCREAMING_SNAKE_CASE : int = {0: "batch"} else: SCREAMING_SNAKE_CASE : int = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(UpperCAmelCase_ , direction="inputs" ) return common_inputs def _A ( self : List[Any] , UpperCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 2_2050 , UpperCAmelCase_ : float = 5.0 , UpperCAmelCase_ : int = 220 , ): SCREAMING_SNAKE_CASE : List[Any] = OrderedDict() SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=UpperCAmelCase_ , framework=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , time_duration=UpperCAmelCase_ , frequency=UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Dict = encoder_inputs["input_features"].shape[2] SCREAMING_SNAKE_CASE : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length SCREAMING_SNAKE_CASE : Union[str, Any] = super().generate_dummy_inputs( preprocessor.tokenizer , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = encoder_inputs.pop("input_features" ) SCREAMING_SNAKE_CASE : Tuple = decoder_inputs.pop("decoder_input_ids" ) if "past_key_values" in decoder_inputs: SCREAMING_SNAKE_CASE : int = decoder_inputs.pop("past_key_values" ) return dummy_inputs @property def _A ( self : Any ): return 1E-3
319
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } snake_case = { """b0""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 224, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 240, """dropout_rate""": 0.2, """dw_padding""": [16], }, """b2""": { """hidden_dim""": 1_408, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 260, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 16], }, """b3""": { """hidden_dim""": 1_536, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 300, """dropout_rate""": 0.3, """dw_padding""": [5, 18], }, """b4""": { """hidden_dim""": 1_792, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 380, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_048, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 456, """dropout_rate""": 0.4, """dw_padding""": [13, 27], }, """b6""": { """hidden_dim""": 2_304, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 528, """dropout_rate""": 0.5, """dw_padding""": [31], }, """b7""": { """hidden_dim""": 2_560, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 600, """dropout_rate""": 0.5, """dw_padding""": [18], }, } def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = EfficientNetConfig() SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"] SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"] SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"] SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"] SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"] SCREAMING_SNAKE_CASE : str = "huggingface/label-files" SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE : str = 1000 SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor( size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , ) return preprocessor def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) ) SCREAMING_SNAKE_CASE : List[str] = len(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )} SCREAMING_SNAKE_CASE : Dict = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b] rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) SCREAMING_SNAKE_CASE : int = {} for item in rename_keys: if item[0] in original_param_names: SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1] SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight" SCREAMING_SNAKE_CASE : List[str] = "classifier.bias" return key_mapping def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue SCREAMING_SNAKE_CASE : str = key_mapping[key] if "_conv" in key and "kernel" in key: SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) ) else: SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name]( include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , ) SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: SCREAMING_SNAKE_CASE : Tuple = param.numpy() SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() ) # Load HuggingFace model SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase ) SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval() SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase ) replace_params(lowercase , lowercase , lowercase ) # Initialize preprocessor and preprocess input image SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase ) SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy() # Original model inference SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase ) SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 ) SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(F'''Pushing converted {model_name} to the hub...''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") snake_case = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
319
1
import os import sys import unittest snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) snake_case = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") snake_case = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : List[str] = get_test_to_tester_mapping(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = get_test_to_tester_mapping(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = {"BertModelTest": "BertModelTester"} SCREAMING_SNAKE_CASE : List[str] = { "BlipModelTest": "BlipModelTester", "BlipTextImageModelTest": "BlipTextImageModelsModelTester", "BlipTextModelTest": "BlipTextModelTester", "BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester", "BlipVQAModelTest": "BlipVQAModelTester", "BlipVisionModelTest": "BlipVisionModelTester", } self.assertEqual(get_test_info.to_json(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(get_test_info.to_json(UpperCAmelCase_ ) , UpperCAmelCase_ ) def _A ( self : Dict ): SCREAMING_SNAKE_CASE : List[str] = get_model_to_test_mapping(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = get_model_to_test_mapping(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = { "BertForMaskedLM": ["BertModelTest"], "BertForMultipleChoice": ["BertModelTest"], "BertForNextSentencePrediction": ["BertModelTest"], "BertForPreTraining": ["BertModelTest"], "BertForQuestionAnswering": ["BertModelTest"], "BertForSequenceClassification": ["BertModelTest"], "BertForTokenClassification": ["BertModelTest"], "BertLMHeadModel": ["BertModelTest"], "BertModel": ["BertModelTest"], } SCREAMING_SNAKE_CASE : Dict = { "BlipForConditionalGeneration": ["BlipTextImageModelTest"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"], "BlipForQuestionAnswering": ["BlipVQAModelTest"], "BlipModel": ["BlipModelTest"], "BlipTextModel": ["BlipTextModelTest"], "BlipVisionModel": ["BlipVisionModelTest"], } self.assertEqual(get_test_info.to_json(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(get_test_info.to_json(UpperCAmelCase_ ) , UpperCAmelCase_ ) def _A ( self : str ): SCREAMING_SNAKE_CASE : str = get_model_to_tester_mapping(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = get_model_to_tester_mapping(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = { "BertForMaskedLM": ["BertModelTester"], "BertForMultipleChoice": ["BertModelTester"], "BertForNextSentencePrediction": ["BertModelTester"], "BertForPreTraining": ["BertModelTester"], "BertForQuestionAnswering": ["BertModelTester"], "BertForSequenceClassification": ["BertModelTester"], "BertForTokenClassification": ["BertModelTester"], "BertLMHeadModel": ["BertModelTester"], "BertModel": ["BertModelTester"], } SCREAMING_SNAKE_CASE : Any = { "BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"], "BlipForQuestionAnswering": ["BlipVQAModelTester"], "BlipModel": ["BlipModelTester"], "BlipTextModel": ["BlipTextModelTester"], "BlipVisionModel": ["BlipVisionModelTester"], } self.assertEqual(get_test_info.to_json(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(get_test_info.to_json(UpperCAmelCase_ ) , UpperCAmelCase_ )
319
def lowerCamelCase__ ( ): """simple docstring""" return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] snake_case = generate_large_matrix() snake_case = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def lowerCamelCase__ ( lowercase ): """simple docstring""" assert all(row == sorted(lowercase , reverse=lowercase ) for row in grid ) assert all(list(lowercase ) == sorted(lowercase , reverse=lowercase ) for col in zip(*lowercase ) ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: SCREAMING_SNAKE_CASE : List[Any] = (left + right) // 2 SCREAMING_SNAKE_CASE : Optional[int] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: SCREAMING_SNAKE_CASE : List[Any] = mid + 1 else: SCREAMING_SNAKE_CASE : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : List[str] = len(grid[0] ) for i in range(len(lowercase ) ): SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] ) total += bound return (len(lowercase ) * len(grid[0] )) - total def lowerCamelCase__ ( lowercase ): """simple docstring""" return len([number for row in grid for number in row if number < 0] ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 0 for row in grid: for i, number in enumerate(lowercase ): if number < 0: total += len(lowercase ) - i break return total def lowerCamelCase__ ( ): """simple docstring""" from timeit import timeit print("Running benchmarks" ) SCREAMING_SNAKE_CASE : List[str] = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(F'''{func}(grid=grid)''' , setup=lowercase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
319
1
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : int=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Union[str, Any]=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Tuple=None , ): SCREAMING_SNAKE_CASE : Union[str, Any] = parent SCREAMING_SNAKE_CASE : Dict = batch_size SCREAMING_SNAKE_CASE : Optional[int] = image_size SCREAMING_SNAKE_CASE : List[str] = patch_size SCREAMING_SNAKE_CASE : List[str] = num_channels SCREAMING_SNAKE_CASE : int = is_training SCREAMING_SNAKE_CASE : Any = use_labels SCREAMING_SNAKE_CASE : Tuple = hidden_size SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Dict = num_attention_heads SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Any = type_sequence_label_size SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Any = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE : Dict = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE : Union[str, Any] = num_patches + 1 def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : str = None if self.use_labels: SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : Tuple = self.get_config() return config, pixel_values, labels def _A ( self : Union[str, Any] ): return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , ) def _A ( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] ): SCREAMING_SNAKE_CASE : List[str] = TFViTModel(config=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ , training=UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. SCREAMING_SNAKE_CASE : Any = self.image_size // 2 SCREAMING_SNAKE_CASE : Optional[Any] = pixel_values[:, :, :image_size, :image_size] SCREAMING_SNAKE_CASE : int = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_ , training=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def _A ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Optional[Any] = self.type_sequence_label_size SCREAMING_SNAKE_CASE : List[str] = TFViTForImageClassification(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = model(UpperCAmelCase_ , labels=UpperCAmelCase_ , training=UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. SCREAMING_SNAKE_CASE : str = self.image_size // 2 SCREAMING_SNAKE_CASE : Union[str, Any] = pixel_values[:, :, :image_size, :image_size] SCREAMING_SNAKE_CASE : int = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_ , training=UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images SCREAMING_SNAKE_CASE : Union[str, Any] = 1 SCREAMING_SNAKE_CASE : Tuple = TFViTForImageClassification(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : List[Any] = model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _A ( self : str ): SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = config_and_inputs SCREAMING_SNAKE_CASE : int = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : int = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () UpperCamelCase_ : List[str] = ( {'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification} if is_tf_available() else {} ) UpperCamelCase_ : Union[str, Any] = False UpperCamelCase_ : Any = False UpperCamelCase_ : List[Any] = False def _A ( self : Any ): SCREAMING_SNAKE_CASE : Any = TFViTModelTester(self ) SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 ) def _A ( self : Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def _A ( self : Optional[int] ): pass @unittest.skip(reason="ViT does not use inputs_embeds" ) def _A ( self : int ): pass def _A ( self : Any ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : List[str] = model_class(UpperCAmelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) SCREAMING_SNAKE_CASE : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase_ , tf.keras.layers.Layer ) ) def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Tuple = model_class(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Optional[Any] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCAmelCase_ ) def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def _A ( self : Dict ): SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ ) @slow def _A ( self : Dict ): SCREAMING_SNAKE_CASE : List[Any] = TFViTModel.from_pretrained("google/vit-base-patch16-224" ) self.assertIsNotNone(UpperCAmelCase_ ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def _A ( self : Optional[int] ): return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None @slow def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : Optional[int] = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ) SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor SCREAMING_SNAKE_CASE : int = prepare_img() SCREAMING_SNAKE_CASE : int = image_processor(images=UpperCAmelCase_ , return_tensors="tf" ) # forward pass SCREAMING_SNAKE_CASE : Optional[int] = model(**UpperCAmelCase_ ) # verify the logits SCREAMING_SNAKE_CASE : int = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant([-0.2_744, 0.8_215, -0.0_836] ) tf.debugging.assert_near(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 )
319
import argparse import os import torch from transformers.utils import WEIGHTS_NAME snake_case = ["""small""", """medium""", """large"""] snake_case = """lm_head.decoder.weight""" snake_case = """lm_head.weight""" def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase ) SCREAMING_SNAKE_CASE : Any = d.pop(lowercase ) os.makedirs(lowercase , exist_ok=lowercase ) torch.save(lowercase , os.path.join(lowercase , lowercase ) ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument("""--dialogpt_path""", default=""".""", type=str) snake_case = parser.parse_args() for MODEL in DIALOGPT_MODELS: snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""") snake_case = F"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
319
1
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowerCamelCase__ ( lowercase ): """simple docstring""" return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue SCREAMING_SNAKE_CASE : List[Any] = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" ) SCREAMING_SNAKE_CASE : Any = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" ) SCREAMING_SNAKE_CASE : str = key.replace("heads.cmd.itm_head.cls" , "itm_head" ) SCREAMING_SNAKE_CASE : int = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" ) SCREAMING_SNAKE_CASE : List[str] = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" ) SCREAMING_SNAKE_CASE : int = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" ) SCREAMING_SNAKE_CASE : Tuple = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" ) SCREAMING_SNAKE_CASE : Optional[int] = key.replace("mm_text_projection" , "flava.text_to_mm_projection" ) SCREAMING_SNAKE_CASE : List[str] = key.replace("mm_image_projection" , "flava.image_to_mm_projection" ) SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace("image_encoder.module" , "flava.image_model" ) SCREAMING_SNAKE_CASE : Optional[int] = key.replace("text_encoder.module" , "flava.text_model" ) SCREAMING_SNAKE_CASE : Optional[Any] = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" ) SCREAMING_SNAKE_CASE : Dict = key.replace("mm_encoder.module" , "flava.multimodal_model" ) SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace("text_projection" , "flava.text_projection" ) SCREAMING_SNAKE_CASE : Tuple = key.replace("image_projection" , "flava.image_projection" ) SCREAMING_SNAKE_CASE : int = value.float() for key, value in codebook_state_dict.items(): SCREAMING_SNAKE_CASE : str = value return upgrade @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase=None ): """simple docstring""" if config_path is not None: SCREAMING_SNAKE_CASE : Optional[Any] = FlavaConfig.from_pretrained(lowercase ) else: SCREAMING_SNAKE_CASE : str = FlavaConfig() SCREAMING_SNAKE_CASE : List[str] = FlavaForPreTraining(lowercase ).eval() SCREAMING_SNAKE_CASE : List[Any] = convert_dalle_checkpoint(lowercase , lowercase , save_checkpoint=lowercase ) if os.path.exists(lowercase ): SCREAMING_SNAKE_CASE : Optional[int] = torch.load(lowercase , map_location="cpu" ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" ) SCREAMING_SNAKE_CASE : Dict = upgrade_state_dict(lowercase , lowercase ) hf_model.load_state_dict(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = hf_model.state_dict() SCREAMING_SNAKE_CASE : List[Any] = count_parameters(lowercase ) SCREAMING_SNAKE_CASE : Tuple = count_parameters(lowercase ) + count_parameters(lowercase ) assert torch.allclose(lowercase , lowercase , atol=1E-3 ) hf_model.save_pretrained(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") snake_case = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
319
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available snake_case = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = GPTSanJapaneseTokenizer UpperCamelCase_ : Any = False UpperCamelCase_ : Union[str, Any] = {'''do_clean_text''': False, '''add_prefix_space''': False} def _A ( self : List[str] ): super().setUp() # fmt: off SCREAMING_SNAKE_CASE : str = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"] # fmt: on SCREAMING_SNAKE_CASE : List[str] = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀 SCREAMING_SNAKE_CASE : Any = {"unk_token": "<unk>"} SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) with open(self.emoji_file , "w" ) as emoji_writer: emoji_writer.write(json.dumps(UpperCAmelCase_ ) ) def _A ( self : Optional[int] , **UpperCAmelCase_ : List[str] ): kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : Dict = "こんにちは、世界。 \nこんばんは、㔺界。😀" SCREAMING_SNAKE_CASE : Optional[int] = "こんにちは、世界。 \nこんばんは、世界。😀" return input_text, output_text def _A ( self : Optional[Any] , UpperCAmelCase_ : List[Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.get_input_output_texts(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def _A ( self : List[Any] ): pass # TODO add if relevant def _A ( self : Tuple ): pass # TODO add if relevant def _A ( self : Union[str, Any] ): pass # TODO add if relevant def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer() # Testing tokenization SCREAMING_SNAKE_CASE : Dict = "こんにちは、世界。 こんばんは、㔺界。" SCREAMING_SNAKE_CASE : int = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"] SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Testing conversion to ids without special tokens SCREAMING_SNAKE_CASE : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Testing conversion to ids with special tokens SCREAMING_SNAKE_CASE : Union[str, Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : Any = self.get_tokenizer() # Testing tokenization SCREAMING_SNAKE_CASE : str = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。" SCREAMING_SNAKE_CASE : int = "こんにちは、、、、世界。こんばんは、、、、世界。" SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" ) # Testing tokenization SCREAMING_SNAKE_CASE : List[Any] = "こんにちは、世界。" SCREAMING_SNAKE_CASE : List[str] = "こんばんは、㔺界。😀" SCREAMING_SNAKE_CASE : Dict = "こんにちは、世界。こんばんは、世界。😀" SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(prefix_text + input_text ) SCREAMING_SNAKE_CASE : str = tokenizer.encode("" , prefix_text=prefix_text + input_text ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode(UpperCAmelCase_ , prefix_text=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = tokenizer.decode(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : str ): SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" ) # Testing tokenization SCREAMING_SNAKE_CASE : Optional[int] = "こんにちは、世界。" SCREAMING_SNAKE_CASE : List[Any] = "こんばんは、㔺界。😀" SCREAMING_SNAKE_CASE : Any = len(tokenizer.encode(UpperCAmelCase_ ) ) - 2 SCREAMING_SNAKE_CASE : List[Any] = len(tokenizer.encode(UpperCAmelCase_ ) ) - 2 SCREAMING_SNAKE_CASE : List[Any] = [1] + [0] * (len_prefix + len_text + 1) SCREAMING_SNAKE_CASE : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0] SCREAMING_SNAKE_CASE : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(prefix_text + input_text ).token_type_ids SCREAMING_SNAKE_CASE : Optional[int] = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(UpperCAmelCase_ , prefix_text=UpperCAmelCase_ ).token_type_ids self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : Dict ): SCREAMING_SNAKE_CASE : Any = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode("あンいワ" ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode("" , prefix_text="あンいワ" ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode("いワ" , prefix_text="あン" ) self.assertEqual(tokenizer.decode(UpperCAmelCase_ ) , tokenizer.decode(UpperCAmelCase_ ) ) self.assertEqual(tokenizer.decode(UpperCAmelCase_ ) , tokenizer.decode(UpperCAmelCase_ ) ) self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" ) SCREAMING_SNAKE_CASE : str = [["武田信玄", "は、"], ["織田信長", "の配下の、"]] SCREAMING_SNAKE_CASE : List[Any] = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = tokenizer.batch_encode_plus(UpperCAmelCase_ , padding=UpperCAmelCase_ ) # fmt: off SCREAMING_SNAKE_CASE : Optional[Any] = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]] SCREAMING_SNAKE_CASE : Dict = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] SCREAMING_SNAKE_CASE : str = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , UpperCAmelCase_ ) self.assertListEqual(x_token.token_type_ids , UpperCAmelCase_ ) self.assertListEqual(x_token.attention_mask , UpperCAmelCase_ ) self.assertListEqual(x_token_a.input_ids , UpperCAmelCase_ ) self.assertListEqual(x_token_a.token_type_ids , UpperCAmelCase_ ) self.assertListEqual(x_token_a.attention_mask , UpperCAmelCase_ ) def _A ( self : Any ): # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def _A ( self : List[str] ): # tokenizer has no padding token pass
319
def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def lowerCamelCase__ ( ): """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
319
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) snake_case = { """configuration_swiftformer""": [ """SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwiftFormerConfig""", """SwiftFormerOnnxConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwiftFormerForImageClassification""", """SwiftFormerModel""", """SwiftFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : Union[str, Any] = set_counts SCREAMING_SNAKE_CASE : Any = max(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [1] * num_sets SCREAMING_SNAKE_CASE : List[str] = list(range(UpperCAmelCase_ ) ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.get_parent(UpperCAmelCase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 SCREAMING_SNAKE_CASE : List[str] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Tuple = src_parent SCREAMING_SNAKE_CASE : Optional[int] = self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Optional[Any] = max(self.max_set , UpperCAmelCase_ ) return True def _A ( self : Tuple , UpperCAmelCase_ : int ): if self.parents[disj_set] == disj_set: return disj_set SCREAMING_SNAKE_CASE : Tuple = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
319
1
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert isinstance(lowercase , lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache" SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : Optional[int] = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path elif issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ): """simple docstring""" assert isinstance(lowercase , lowercase ) for split in splits: SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tmp_path / "cache" SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : str = ParquetDatasetReader( {"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if split: SCREAMING_SNAKE_CASE : Any = {split: parquet_path} else: SCREAMING_SNAKE_CASE : Tuple = "train" SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path} SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" ) SCREAMING_SNAKE_CASE : List[Any] = pf.read() assert dataset.data.table == output_table def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]} SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} ) SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert get_writer_batch_size(lowercase ) == expected
319
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''timm_backbone''' def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ): super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = backbone SCREAMING_SNAKE_CASE : List[str] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = features_only SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
319
1
from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake snake_case = numpy.array([0, 0]) snake_case = numpy.array([0.5, 0.8660254]) snake_case = numpy.array([1, 0]) snake_case = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = initial_vectors for _ in range(lowercase ): SCREAMING_SNAKE_CASE : List[Any] = iteration_step(lowercase ) return vectors def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = [] for i, start_vector in enumerate(vectors[:-1] ): SCREAMING_SNAKE_CASE : Dict = vectors[i + 1] new_vectors.append(lowercase ) SCREAMING_SNAKE_CASE : List[Any] = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = numpy.radians(lowercase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = numpy.cos(lowercase ), numpy.sin(lowercase ) SCREAMING_SNAKE_CASE : int = numpy.array(((c, -s), (s, c)) ) return numpy.dot(lowercase , lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = plt.gca() axes.set_aspect("equal" ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = zip(*lowercase ) plt.plot(lowercase , lowercase ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() snake_case = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
319
from math import sqrt def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 for i in range(1 , int(sqrt(lowercase ) + 1 ) ): if n % i == 0 and i != sqrt(lowercase ): total += i + n // i elif i == sqrt(lowercase ): total += i return total - n def lowerCamelCase__ ( lowercase = 10000 ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = sum( i for i in range(1 , lowercase ) if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
319
1
import argparse import os import torch from transformers.utils import WEIGHTS_NAME snake_case = ["""small""", """medium""", """large"""] snake_case = """lm_head.decoder.weight""" snake_case = """lm_head.weight""" def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase ) SCREAMING_SNAKE_CASE : Any = d.pop(lowercase ) os.makedirs(lowercase , exist_ok=lowercase ) torch.save(lowercase , os.path.join(lowercase , lowercase ) ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument("""--dialogpt_path""", default=""".""", type=str) snake_case = parser.parse_args() for MODEL in DIALOGPT_MODELS: snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""") snake_case = F"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
319
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) snake_case = { """configuration_encodec""": [ """ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EncodecConfig""", ], """feature_extraction_encodec""": ["""EncodecFeatureExtractor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""", """EncodecModel""", """EncodecPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
from manim import * class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def _A ( self : str ): SCREAMING_SNAKE_CASE : int = Rectangle(height=0.5 , width=0.5 ) SCREAMING_SNAKE_CASE : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.25 , width=0.25 ) SCREAMING_SNAKE_CASE : Tuple = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Dict = Text("CPU" , font_size=24 ) SCREAMING_SNAKE_CASE : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(4 )] SCREAMING_SNAKE_CASE : Union[str, Any] = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : int = Text("GPU" , font_size=24 ) SCREAMING_SNAKE_CASE : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ ) gpu.move_to([-1, -1, 0] ) self.add(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Dict = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Tuple = Text("Model" , font_size=24 ) SCREAMING_SNAKE_CASE : Dict = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ ) model.move_to([3, -1.0, 0] ) self.add(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : str = [] for i, rect in enumerate(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : List[Any] = fill.copy().set_fill(UpperCAmelCase_ , opacity=0.8 ) target.move_to(UpperCAmelCase_ ) model_arr.append(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(UpperCAmelCase_ ) self.add(*UpperCAmelCase_ , *UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : str = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : List[str] = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : List[str] = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : List[str] = Text("Disk" , font_size=24 ) SCREAMING_SNAKE_CASE : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ ) disk.move_to([-4, -1.25, 0] ) self.add(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) SCREAMING_SNAKE_CASE : Any = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = MarkupText( f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(UpperCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = MarkupText( f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Dict = Square(0.3 ) input.set_fill(UpperCAmelCase_ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , UpperCAmelCase_ , buff=0.5 ) self.play(Write(UpperCAmelCase_ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=UpperCAmelCase_ , buff=0.02 ) self.play(MoveToTarget(UpperCAmelCase_ ) ) self.play(FadeOut(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = Arrow(start=UpperCAmelCase_ , end=UpperCAmelCase_ , color=UpperCAmelCase_ , buff=0.5 ) a.next_to(model_arr[0].get_left() , UpperCAmelCase_ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) SCREAMING_SNAKE_CASE : List[str] = MarkupText( f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(UpperCAmelCase_ , run_time=3 ) ) SCREAMING_SNAKE_CASE : int = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.02} self.play( Write(UpperCAmelCase_ ) , Circumscribe(model_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) SCREAMING_SNAKE_CASE : Any = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCAmelCase_ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) SCREAMING_SNAKE_CASE : int = AnimationGroup( FadeOut(UpperCAmelCase_ , run_time=0.5 ) , MoveToTarget(UpperCAmelCase_ , run_time=0.5 ) , FadeIn(UpperCAmelCase_ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(UpperCAmelCase_ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: SCREAMING_SNAKE_CASE : List[str] = 0.7 self.play( Circumscribe(model_arr[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) SCREAMING_SNAKE_CASE : Tuple = a_c SCREAMING_SNAKE_CASE : Any = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(UpperCAmelCase_ ) , FadeOut(UpperCAmelCase_ , run_time=0.5 ) , ) SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(UpperCAmelCase_ , run_time=3 ) , MoveToTarget(UpperCAmelCase_ ) ) self.wait()
319
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: snake_case = None snake_case = logging.get_logger(__name__) snake_case = """▁""" snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} snake_case = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } snake_case = { """google/pegasus-xsum""": 512, } class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = PegasusTokenizer UpperCamelCase_ : str = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="<mask_2>" , UpperCAmelCase_ : Optional[int]="<mask_1>" , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=103 , **UpperCAmelCase_ : Optional[int] , ): SCREAMING_SNAKE_CASE : Optional[Any] = offset if additional_special_tokens is not None: if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise TypeError( f'''additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is''' f''' {type(UpperCAmelCase_ )}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(UpperCAmelCase_ ) , self.offset - 1 ) ] if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) SCREAMING_SNAKE_CASE : int = additional_special_tokens_extended else: SCREAMING_SNAKE_CASE : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : str = vocab_file SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def _A ( self : int , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ): if already_has_special_tokens: return self._special_token_mask(UpperCAmelCase_ ) elif token_ids_a is None: return self._special_token_mask(UpperCAmelCase_ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(UpperCAmelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : List[str] = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) return (out_vocab_file,)
319
1
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean snake_case = 0 snake_case = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right snake_case = tuple[int, int] class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Node | None , ): SCREAMING_SNAKE_CASE : Optional[int] = pos_x SCREAMING_SNAKE_CASE : str = pos_y SCREAMING_SNAKE_CASE : Optional[Any] = (pos_y, pos_x) SCREAMING_SNAKE_CASE : Dict = goal_x SCREAMING_SNAKE_CASE : List[str] = goal_y SCREAMING_SNAKE_CASE : Optional[Any] = g_cost SCREAMING_SNAKE_CASE : str = parent SCREAMING_SNAKE_CASE : Dict = self.calculate_heuristic() SCREAMING_SNAKE_CASE : Dict = self.g_cost + self.h_cost def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : Optional[int] = self.pos_x - self.goal_x SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(UpperCAmelCase_ ) + abs(UpperCAmelCase_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : Optional[int] , UpperCAmelCase_ : Node ): return self.f_cost < other.f_cost class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : TPosition , UpperCAmelCase_ : TPosition ): SCREAMING_SNAKE_CASE : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = [self.start] SCREAMING_SNAKE_CASE : list[Node] = [] SCREAMING_SNAKE_CASE : int = False def _A ( self : Tuple ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() SCREAMING_SNAKE_CASE : str = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(UpperCAmelCase_ ) self.closed_nodes.append(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = self.get_successors(UpperCAmelCase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(UpperCAmelCase_ ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : Dict = self.open_nodes.pop(self.open_nodes.index(UpperCAmelCase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(UpperCAmelCase_ ) else: self.open_nodes.append(UpperCAmelCase_ ) return [self.start.pos] def _A ( self : List[Any] , UpperCAmelCase_ : Node ): SCREAMING_SNAKE_CASE : Dict = [] for action in delta: SCREAMING_SNAKE_CASE : int = parent.pos_x + action[1] SCREAMING_SNAKE_CASE : Optional[Any] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( UpperCAmelCase_ , UpperCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCAmelCase_ , ) ) return successors def _A ( self : str , UpperCAmelCase_ : Node | None ): SCREAMING_SNAKE_CASE : Dict = node SCREAMING_SNAKE_CASE : Optional[Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) SCREAMING_SNAKE_CASE : str = current_node.parent path.reverse() return path class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase_ : TPosition , UpperCAmelCase_ : TPosition ): SCREAMING_SNAKE_CASE : Union[str, Any] = AStar(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = AStar(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = False def _A ( self : Union[str, Any] ): while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 ) SCREAMING_SNAKE_CASE : str = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( UpperCAmelCase_ , UpperCAmelCase_ ) self.fwd_astar.closed_nodes.append(UpperCAmelCase_ ) self.bwd_astar.closed_nodes.append(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = current_bwd_node SCREAMING_SNAKE_CASE : Optional[int] = current_fwd_node SCREAMING_SNAKE_CASE : Optional[int] = { self.fwd_astar: self.fwd_astar.get_successors(UpperCAmelCase_ ), self.bwd_astar: self.bwd_astar.get_successors(UpperCAmelCase_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(UpperCAmelCase_ ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : Dict = astar.open_nodes.pop( astar.open_nodes.index(UpperCAmelCase_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(UpperCAmelCase_ ) else: astar.open_nodes.append(UpperCAmelCase_ ) return [self.fwd_astar.start.pos] def _A ( self : Optional[Any] , UpperCAmelCase_ : Node , UpperCAmelCase_ : Node ): SCREAMING_SNAKE_CASE : List[Any] = self.fwd_astar.retrace_path(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.bwd_astar.retrace_path(UpperCAmelCase_ ) bwd_path.pop() bwd_path.reverse() SCREAMING_SNAKE_CASE : Dict = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] snake_case = (0, 0) snake_case = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) snake_case = time.time() snake_case = AStar(init, goal) snake_case = a_star.search() snake_case = time.time() - start_time print(F"""AStar execution time = {end_time:f} seconds""") snake_case = time.time() snake_case = BidirectionalAStar(init, goal) snake_case = time.time() - bd_start_time print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
319
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available snake_case = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""SpeechEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""FlaxSpeechEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
319
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case = 16 snake_case = 32 def lowerCamelCase__ ( lowercase , lowercase = 16 ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE : List[Any] = datasets.map( lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE : str = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE : Optional[Any] = 8 else: SCREAMING_SNAKE_CASE : Union[str, Any] = None return tokenizer.pad( lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case = mocked_dataloaders # noqa: F811 def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1": SCREAMING_SNAKE_CASE : int = 2 # New Code # SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE : Tuple = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE : Any = config["lr"] SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] ) SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] ) SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" ) set_seed(lowercase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase ) # Instantiate scheduler SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare( lowercase , lowercase , lowercase , lowercase , lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase ): SCREAMING_SNAKE_CASE : Any = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = output.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowercase , references=lowercase , ) SCREAMING_SNAKE_CASE : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , lowercase ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE : List[str] = parser.parse_args() SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowercase , lowercase ) if __name__ == "__main__": main()
319
1
import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : int ): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden SCREAMING_SNAKE_CASE : Optional[Any] = deepcopy(UpperCAmelCase_ ) elif os.path.exists(UpperCAmelCase_ ): with io.open(UpperCAmelCase_ , "r" , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(UpperCAmelCase_ ) else: try: SCREAMING_SNAKE_CASE : List[str] = baseaa.urlsafe_baadecode(UpperCAmelCase_ ).decode("utf-8" ) SCREAMING_SNAKE_CASE : str = json.loads(UpperCAmelCase_ ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = config self.set_stage_and_offload() def _A ( self : int ): # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. SCREAMING_SNAKE_CASE : int = self.get_value("zero_optimization.stage" , -1 ) # offload SCREAMING_SNAKE_CASE : List[str] = False if self.is_zeroa() or self.is_zeroa(): SCREAMING_SNAKE_CASE : str = set(["cpu", "nvme"] ) SCREAMING_SNAKE_CASE : Any = set( [ self.get_value("zero_optimization.offload_optimizer.device" ), self.get_value("zero_optimization.offload_param.device" ), ] ) if len(offload_devices & offload_devices_valid ) > 0: SCREAMING_SNAKE_CASE : Optional[int] = True def _A ( self : List[str] , UpperCAmelCase_ : List[str] ): SCREAMING_SNAKE_CASE : Optional[int] = self.config # find the config node of interest if it exists SCREAMING_SNAKE_CASE : Optional[Any] = ds_key_long.split("." ) SCREAMING_SNAKE_CASE : str = nodes.pop() for node in nodes: SCREAMING_SNAKE_CASE : str = config.get(UpperCAmelCase_ ) if config is None: return None, ds_key return config, ds_key def _A ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : int=None ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.find_config_node(UpperCAmelCase_ ) if config is None: return default return config.get(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple=False ): SCREAMING_SNAKE_CASE : Tuple = self.config # find the config node of interest if it exists SCREAMING_SNAKE_CASE : List[Any] = ds_key_long.split("." ) for node in nodes: SCREAMING_SNAKE_CASE : Tuple = config SCREAMING_SNAKE_CASE : List[str] = config.get(UpperCAmelCase_ ) if config is None: if must_exist: raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' ) else: return # if found remove it if parent_config is not None: parent_config.pop(UpperCAmelCase_ ) def _A ( self : List[Any] , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_value(UpperCAmelCase_ ) return False if value is None else bool(UpperCAmelCase_ ) def _A ( self : List[Any] , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : Optional[int] = self.get_value(UpperCAmelCase_ ) return False if value is None else not bool(UpperCAmelCase_ ) def _A ( self : Optional[Any] ): return self._stage == 2 def _A ( self : str ): return self._stage == 3 def _A ( self : Optional[int] ): return self._offload class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Any , UpperCAmelCase_ : List[Any] ): SCREAMING_SNAKE_CASE : Optional[Any] = engine def _A ( self : Dict , UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ): # runs backpropagation and handles mixed precision self.engine.backward(UpperCAmelCase_ , **UpperCAmelCase_ ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple ): super().__init__(UpperCAmelCase_ , device_placement=UpperCAmelCase_ , scaler=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = hasattr(self.optimizer , "overflow" ) def _A ( self : Optional[Any] , UpperCAmelCase_ : int=None ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def _A ( self : Any ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def _A ( self : List[str] ): if self.__has_overflow__: return self.optimizer.overflow return False class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ): super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : str ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int=0.001 , UpperCAmelCase_ : Dict=0 , **UpperCAmelCase_ : List[str] ): SCREAMING_SNAKE_CASE : Union[str, Any] = params SCREAMING_SNAKE_CASE : Optional[Any] = lr SCREAMING_SNAKE_CASE : List[Any] = weight_decay SCREAMING_SNAKE_CASE : int = kwargs class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=0 , **UpperCAmelCase_ : List[str] ): SCREAMING_SNAKE_CASE : Tuple = optimizer SCREAMING_SNAKE_CASE : List[Any] = total_num_steps SCREAMING_SNAKE_CASE : Dict = warmup_num_steps SCREAMING_SNAKE_CASE : Any = kwargs
319
import functools def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if not isinstance(lowercase , lowercase ) or not all(isinstance(lowercase , lowercase ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(lowercase ) != 3 or not all(isinstance(lowercase , lowercase ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(lowercase ) == 0: return 0 if min(lowercase ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(lowercase ) >= 366: raise ValueError("All days elements should be less than 366" ) SCREAMING_SNAKE_CASE : Dict = set(lowercase ) @functools.cache def dynamic_programming(lowercase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase = None , lowercase = None , ): """simple docstring""" if config_name_or_path is None: SCREAMING_SNAKE_CASE : int = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base" if generator_tokenizer_name_or_path is None: SCREAMING_SNAKE_CASE : List[Any] = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: SCREAMING_SNAKE_CASE : Optional[Any] = question_encoder_name_or_path SCREAMING_SNAKE_CASE : Dict = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration # Save model. SCREAMING_SNAKE_CASE : Optional[int] = RagConfig.from_pretrained(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(lowercase ) SCREAMING_SNAKE_CASE : List[str] = gen_config SCREAMING_SNAKE_CASE : List[Any] = question_encoder_config SCREAMING_SNAKE_CASE : Optional[int] = model_class.from_pretrained_question_encoder_generator( lowercase , lowercase , config=lowercase ) rag_model.save_pretrained(lowercase ) # Sanity check. model_class.from_pretrained(lowercase ) # Save tokenizers. SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(lowercase ) gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(lowercase ) question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument( """--model_type""", choices=["""rag_sequence""", """rag_token"""], required=True, type=str, help="""RAG model type: rag_sequence, rag_token""", ) parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""") parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""") parser.add_argument( """--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier""" ) parser.add_argument( """--generator_tokenizer_name_or_path""", type=str, help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""", ) parser.add_argument( """--question_encoder_tokenizer_name_or_path""", type=str, help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""", ) parser.add_argument( """--config_name_or_path""", type=str, help=( """Identifier of the model config to use, if not provided, resolves to a base config for a given""" """ ``model_type``""" ), ) snake_case = parser.parse_args() snake_case = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
319
def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
319
1
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar snake_case = TypeVar("""KEY""") snake_case = TypeVar("""VAL""") @dataclass(frozen=lowerCAmelCase , slots=lowerCAmelCase ) class SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ): '''simple docstring''' UpperCamelCase_ : KEY UpperCamelCase_ : VAL class SCREAMING_SNAKE_CASE ( _Item ): '''simple docstring''' def __init__( self : Optional[int] ): super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __bool__( self : List[str] ): return False snake_case = _DeletedItem() class SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : float = 0.75 ): SCREAMING_SNAKE_CASE : Optional[Any] = initial_block_size SCREAMING_SNAKE_CASE : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 SCREAMING_SNAKE_CASE : str = capacity_factor SCREAMING_SNAKE_CASE : Optional[Any] = 0 def _A ( self : Union[str, Any] , UpperCAmelCase_ : KEY ): return hash(UpperCAmelCase_ ) % len(self._buckets ) def _A ( self : Optional[Any] , UpperCAmelCase_ : int ): return (ind + 1) % len(self._buckets ) def _A ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : KEY , UpperCAmelCase_ : VAL ): SCREAMING_SNAKE_CASE : Optional[int] = self._buckets[ind] if not stored: SCREAMING_SNAKE_CASE : Tuple = _Item(UpperCAmelCase_ , UpperCAmelCase_ ) self._len += 1 return True elif stored.key == key: SCREAMING_SNAKE_CASE : Tuple = _Item(UpperCAmelCase_ , UpperCAmelCase_ ) return True else: return False def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : Any = len(self._buckets ) * self._capacity_factor return len(self ) >= int(UpperCAmelCase_ ) def _A ( self : Any ): if len(self._buckets ) <= self._initial_block_size: return False SCREAMING_SNAKE_CASE : Union[str, Any] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def _A ( self : Union[str, Any] , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = self._buckets SCREAMING_SNAKE_CASE : List[Any] = [None] * new_size SCREAMING_SNAKE_CASE : str = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def _A ( self : Any ): self._resize(len(self._buckets ) * 2 ) def _A ( self : Tuple ): self._resize(len(self._buckets ) // 2 ) def _A ( self : Tuple , UpperCAmelCase_ : KEY ): SCREAMING_SNAKE_CASE : Optional[Any] = self._get_bucket_index(UpperCAmelCase_ ) for _ in range(len(self._buckets ) ): yield ind SCREAMING_SNAKE_CASE : Optional[int] = self._get_next_ind(UpperCAmelCase_ ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : KEY , UpperCAmelCase_ : VAL ): for ind in self._iterate_buckets(UpperCAmelCase_ ): if self._try_set(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): break def __setitem__( self : Optional[int] , UpperCAmelCase_ : KEY , UpperCAmelCase_ : VAL ): if self._is_full(): self._size_up() self._add_item(UpperCAmelCase_ , UpperCAmelCase_ ) def __delitem__( self : Union[str, Any] , UpperCAmelCase_ : KEY ): for ind in self._iterate_buckets(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Optional[int] = self._buckets[ind] if item is None: raise KeyError(UpperCAmelCase_ ) if item is _deleted: continue if item.key == key: SCREAMING_SNAKE_CASE : int = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Optional[int] , UpperCAmelCase_ : KEY ): for ind in self._iterate_buckets(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Union[str, Any] = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(UpperCAmelCase_ ) def __len__( self : Optional[int] ): return self._len def __iter__( self : Union[str, Any] ): yield from (item.key for item in self._buckets if item) def __repr__( self : List[Any] ): SCREAMING_SNAKE_CASE : Union[str, Any] = " ,".join( f'''{item.key}: {item.val}''' for item in self._buckets if item ) return f'''HashMap({val_string})'''
319
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = [ ("""bert.bert""", """visual_bert"""), ("""bert.cls""", """cls"""), ("""bert.classifier""", """cls"""), ("""token_type_embeddings_visual""", """visual_token_type_embeddings"""), ("""position_embeddings_visual""", """visual_position_embeddings"""), ("""projection""", """visual_projection"""), ] snake_case = [ """nlvr2_coco_pre_trained.th""", """nlvr2_fine_tuned.th""", """nlvr2_pre_trained.th""", """vcr_coco_pre_train.th""", """vcr_fine_tune.th""", """vcr_pre_train.th""", """vqa_coco_pre_trained.th""", """vqa_fine_tuned.th""", """vqa_pre_trained.th""", ] def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" ) return sd def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = OrderedDict() SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue SCREAMING_SNAKE_CASE : Optional[Any] = key for name_pair in rename_keys_prefix: SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] ) SCREAMING_SNAKE_CASE : Union[str, Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: SCREAMING_SNAKE_CASE : str = "pretraining" if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512} SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice" elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048} SCREAMING_SNAKE_CASE : Any = "vqa_advanced" elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129} SCREAMING_SNAKE_CASE : Tuple = "vqa" elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : int = { "visual_embedding_dim": 1024, "num_labels": 2, } SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr" SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase ) # Load State Dict SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase ) if model_type == "pretraining": SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase ) elif model_type == "vqa": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase ) elif model_type == "nlvr": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase ) elif model_type == "multichoice": SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase ) model.load_state_dict(lowercase ) # Save Checkpoints Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""") snake_case = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
319
1
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs snake_case = imread(r"""digital_image_processing/image_data/lena_small.jpg""") snake_case = cvtColor(img, COLOR_BGR2GRAY) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = cn.convert_to_negative(lowercase ) # assert negative_img array for at least one True assert negative_img.any() def lowerCamelCase__ ( ): """simple docstring""" with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img: # Work around assertion for response assert str(cc.change_contrast(lowercase , 110 ) ).startswith( "<PIL.Image.Image image mode=RGB size=100x100 at" ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = imread("digital_image_processing/image_data/lena_small.jpg" , 0 ) # assert ambiguous array for all == True assert canny_img.all() SCREAMING_SNAKE_CASE : int = canny.canny(lowercase ) # assert canny array for at least one True assert canny_array.any() def lowerCamelCase__ ( ): """simple docstring""" assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all() def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) SCREAMING_SNAKE_CASE : Tuple = conv.img_convolve(lowercase , lowercase ).astype(lowercase ) assert res.any() def lowerCamelCase__ ( ): """simple docstring""" assert med.median_filter(lowercase , 3 ).any() def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = sob.sobel_filter(lowercase ) assert grad.any() and theta.any() def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = sp.make_sepia(lowercase , 20 ) assert sepia.all() def lowerCamelCase__ ( lowercase = "digital_image_processing/image_data/lena_small.jpg" ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = bs.Burkes(imread(lowercase , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def lowerCamelCase__ ( lowercase = "digital_image_processing/image_data/lena_small.jpg" , ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = rs.NearestNeighbour(imread(lowercase , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = "digital_image_processing/image_data/lena.jpg" # Reading the image and converting it to grayscale. SCREAMING_SNAKE_CASE : Dict = imread(lowercase , 0 ) # Test for get_neighbors_pixel function() return not None SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Optional[Any] = 0 SCREAMING_SNAKE_CASE : List[str] = image[x_coordinate][y_coordinate] SCREAMING_SNAKE_CASE : Optional[Any] = lbp.get_neighbors_pixel( lowercase , lowercase , lowercase , lowercase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): SCREAMING_SNAKE_CASE : List[Any] = lbp.local_binary_value(lowercase , lowercase , lowercase ) assert lbp_image.any()
319
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''ClapFeatureExtractor''' UpperCamelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ): super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("sampling_rate" , UpperCAmelCase_ ) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none." ) if text is not None: SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if audios is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor( UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None and audios is not None: SCREAMING_SNAKE_CASE : Optional[Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ ) def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def _A ( self : str ): SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
319
1
import math def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = [True] * n SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : str = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): SCREAMING_SNAKE_CASE : str = i * 2 while index < n: SCREAMING_SNAKE_CASE : List[str] = False SCREAMING_SNAKE_CASE : str = index + i SCREAMING_SNAKE_CASE : Any = [2] for i in range(3 , lowercase , 2 ): if is_prime[i]: primes.append(lowercase ) return primes def lowerCamelCase__ ( lowercase = 999966663333 ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = math.floor(math.sqrt(lowercase ) ) + 100 SCREAMING_SNAKE_CASE : Union[str, Any] = prime_sieve(lowercase ) SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : List[str] = primes[prime_index] while (last_prime**2) <= limit: SCREAMING_SNAKE_CASE : Any = primes[prime_index + 1] SCREAMING_SNAKE_CASE : str = last_prime**2 SCREAMING_SNAKE_CASE : Any = next_prime**2 # Get numbers divisible by lps(current) SCREAMING_SNAKE_CASE : Tuple = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) SCREAMING_SNAKE_CASE : Optional[int] = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps SCREAMING_SNAKE_CASE : Dict = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair SCREAMING_SNAKE_CASE : List[str] = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
319
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert isinstance(lowercase , lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache" SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : Optional[int] = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path elif issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ): """simple docstring""" assert isinstance(lowercase , lowercase ) for split in splits: SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tmp_path / "cache" SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : str = ParquetDatasetReader( {"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if split: SCREAMING_SNAKE_CASE : Any = {split: parquet_path} else: SCREAMING_SNAKE_CASE : Tuple = "train" SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path} SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" ) SCREAMING_SNAKE_CASE : List[Any] = pf.read() assert dataset.data.table == output_table def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]} SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} ) SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert get_writer_batch_size(lowercase ) == expected
319
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter snake_case = """Create a default config file for Accelerate with only a few flags set.""" def lowerCamelCase__ ( lowercase="no" , lowercase = default_json_config_file , lowercase = False ): """simple docstring""" SCREAMING_SNAKE_CASE : int = Path(lowercase ) path.parent.mkdir(parents=lowercase , exist_ok=lowercase ) if path.exists(): print( F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False SCREAMING_SNAKE_CASE : Any = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) SCREAMING_SNAKE_CASE : Any = { "compute_environment": "LOCAL_MACHINE", "mixed_precision": mixed_precision, } if torch.cuda.is_available(): SCREAMING_SNAKE_CASE : Any = torch.cuda.device_count() SCREAMING_SNAKE_CASE : Dict = num_gpus SCREAMING_SNAKE_CASE : Dict = False if num_gpus > 1: SCREAMING_SNAKE_CASE : Any = "MULTI_GPU" else: SCREAMING_SNAKE_CASE : List[Any] = "NO" elif is_xpu_available() and use_xpu: SCREAMING_SNAKE_CASE : Optional[Any] = torch.xpu.device_count() SCREAMING_SNAKE_CASE : List[str] = num_xpus SCREAMING_SNAKE_CASE : List[str] = False if num_xpus > 1: SCREAMING_SNAKE_CASE : Tuple = "MULTI_XPU" else: SCREAMING_SNAKE_CASE : Optional[Any] = "NO" elif is_npu_available(): SCREAMING_SNAKE_CASE : Union[str, Any] = torch.npu.device_count() SCREAMING_SNAKE_CASE : Optional[int] = num_npus SCREAMING_SNAKE_CASE : Tuple = False if num_npus > 1: SCREAMING_SNAKE_CASE : List[str] = "MULTI_NPU" else: SCREAMING_SNAKE_CASE : Dict = "NO" else: SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : Tuple = 1 SCREAMING_SNAKE_CASE : Optional[Any] = "NO" SCREAMING_SNAKE_CASE : Any = ClusterConfig(**lowercase ) config.to_json_file(lowercase ) return path def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = parser.add_parser("default" , parents=lowercase , help=lowercase , formatter_class=lowercase ) parser.add_argument( "--config_file" , default=lowercase , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , dest="save_location" , ) parser.add_argument( "--mixed_precision" , choices=["no", "fp16", "bf16"] , type=lowercase , help="Whether or not to use mixed precision training. " "Choose between FP16 and BF16 (bfloat16) training. " "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , ) parser.set_defaults(func=lowercase ) return parser def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'''accelerate configuration saved at {config_file}''' )
319
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FocalNetForImageClassification""", """FocalNetForMaskedImageModeling""", """FocalNetBackbone""", """FocalNetModel""", """FocalNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase=True , lowercase="pt" ): """simple docstring""" SCREAMING_SNAKE_CASE : int = {"add_prefix_space": True} if isinstance(lowercase , lowercase ) and not line.startswith(" " ) else {} SCREAMING_SNAKE_CASE : Optional[int] = padding_side return tokenizer( [line] , max_length=lowercase , padding="max_length" if pad_to_max_length else None , truncation=lowercase , return_tensors=lowercase , add_special_tokens=lowercase , **lowercase , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase=None , ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = input_ids.ne(lowercase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict="train" , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[str]="" , ): super().__init__() SCREAMING_SNAKE_CASE : List[Any] = Path(UpperCAmelCase_ ).joinpath(type_path + ".source" ) SCREAMING_SNAKE_CASE : Tuple = Path(UpperCAmelCase_ ).joinpath(type_path + ".target" ) SCREAMING_SNAKE_CASE : Dict = self.get_char_lens(self.src_file ) SCREAMING_SNAKE_CASE : Tuple = max_source_length SCREAMING_SNAKE_CASE : Union[str, Any] = max_target_length assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}''' SCREAMING_SNAKE_CASE : Optional[int] = tokenizer SCREAMING_SNAKE_CASE : List[str] = prefix if n_obs is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.src_lens[:n_obs] SCREAMING_SNAKE_CASE : int = src_lang SCREAMING_SNAKE_CASE : List[Any] = tgt_lang def __len__( self : Any ): return len(self.src_lens ) def __getitem__( self : Tuple , UpperCAmelCase_ : Dict ): SCREAMING_SNAKE_CASE : str = index + 1 # linecache starts at 1 SCREAMING_SNAKE_CASE : Tuple = self.prefix + linecache.getline(str(self.src_file ) , UpperCAmelCase_ ).rstrip("\n" ) SCREAMING_SNAKE_CASE : int = linecache.getline(str(self.tgt_file ) , UpperCAmelCase_ ).rstrip("\n" ) assert source_line, f'''empty source line for index {index}''' assert tgt_line, f'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer , UpperCAmelCase_ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right SCREAMING_SNAKE_CASE : Tuple = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCAmelCase_ ) else self.tokenizer ) SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , UpperCAmelCase_ ) else self.tokenizer SCREAMING_SNAKE_CASE : Any = encode_line(UpperCAmelCase_ , UpperCAmelCase_ , self.max_source_length , "right" ) SCREAMING_SNAKE_CASE : Dict = encode_line(UpperCAmelCase_ , UpperCAmelCase_ , self.max_target_length , "right" ) SCREAMING_SNAKE_CASE : Optional[Any] = source_inputs["input_ids"].squeeze() SCREAMING_SNAKE_CASE : int = target_inputs["input_ids"].squeeze() SCREAMING_SNAKE_CASE : int = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def _A ( UpperCAmelCase_ : Optional[Any] ): return [len(UpperCAmelCase_ ) for x in Path(UpperCAmelCase_ ).open().readlines()] def _A ( self : int , UpperCAmelCase_ : str ): SCREAMING_SNAKE_CASE : Optional[int] = torch.stack([x["input_ids"] for x in batch] ) SCREAMING_SNAKE_CASE : str = torch.stack([x["attention_mask"] for x in batch] ) SCREAMING_SNAKE_CASE : Tuple = torch.stack([x["decoder_input_ids"] for x in batch] ) SCREAMING_SNAKE_CASE : List[Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , UpperCAmelCase_ ) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE : str = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , UpperCAmelCase_ ) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE : Any = trim_batch(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = trim_batch(UpperCAmelCase_ , UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = { "input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y, } return batch snake_case = getLogger(__name__) def lowerCamelCase__ ( lowercase ): """simple docstring""" return list(itertools.chain.from_iterable(lowercase ) ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = get_git_info() save_json(lowercase , os.path.join(lowercase , "git_log.json" ) ) def lowerCamelCase__ ( lowercase , lowercase , lowercase=4 , **lowercase ): """simple docstring""" with open(lowercase , "w" ) as f: json.dump(lowercase , lowercase , indent=lowercase , **lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" with open(lowercase ) as f: return json.load(lowercase ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = git.Repo(search_parent_directories=lowercase ) SCREAMING_SNAKE_CASE : List[str] = { "repo_id": str(lowercase ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), "hostname": str(socket.gethostname() ), } return repo_infos def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" return list(map(lowercase , lowercase ) ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" with open(lowercase , "wb" ) as f: return pickle.dump(lowercase , lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" def remove_articles(lowercase ): return re.sub(R"\b(a|an|the)\b" , " " , lowercase ) def white_space_fix(lowercase ): return " ".join(text.split() ) def remove_punc(lowercase ): SCREAMING_SNAKE_CASE : Any = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowercase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowercase ) ) ) ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = normalize_answer(lowercase ).split() SCREAMING_SNAKE_CASE : int = normalize_answer(lowercase ).split() SCREAMING_SNAKE_CASE : Tuple = Counter(lowercase ) & Counter(lowercase ) SCREAMING_SNAKE_CASE : Dict = sum(common.values() ) if num_same == 0: return 0 SCREAMING_SNAKE_CASE : Tuple = 1.0 * num_same / len(lowercase ) SCREAMING_SNAKE_CASE : str = 1.0 * num_same / len(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" return normalize_answer(lowercase ) == normalize_answer(lowercase ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert len(lowercase ) == len(lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = 0 for hypo, pred in zip(lowercase , lowercase ): em += exact_match_score(lowercase , lowercase ) if len(lowercase ) > 0: em /= len(lowercase ) return {"em": em} def lowerCamelCase__ ( lowercase ): """simple docstring""" return model_prefix.startswith("rag" ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead SCREAMING_SNAKE_CASE : Optional[Any] = "dropout_rate" for p in extra_params: if getattr(lowercase , lowercase , lowercase ): if not hasattr(lowercase , lowercase ) and not hasattr(lowercase , equivalent_param[p] ): logger.info("config doesn't have a `{}` attribute".format(lowercase ) ) delattr(lowercase , lowercase ) continue SCREAMING_SNAKE_CASE : Any = p if hasattr(lowercase , lowercase ) else equivalent_param[p] setattr(lowercase , lowercase , getattr(lowercase , lowercase ) ) delattr(lowercase , lowercase ) return hparams, config
319
def lowerCamelCase__ ( lowercase , lowercase = 0 ): """simple docstring""" SCREAMING_SNAKE_CASE : int = length or len(lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i] SCREAMING_SNAKE_CASE : str = True return list_data if not swapped else bubble_sort(lowercase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _A ( self : str ): SCREAMING_SNAKE_CASE : int = inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE : str = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) SCREAMING_SNAKE_CASE : List[str] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def _A ( self : int ): SCREAMING_SNAKE_CASE : int = f''' {self.test_dir}/xla_spawn.py --num_cores 8 {self.test_file_path} '''.split() SCREAMING_SNAKE_CASE : List[str] = [sys.executable] + distributed_args execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() )
319
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger snake_case = get_logger(__name__) snake_case = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : str , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[int] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ): for processor in self: SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(processor.__call__ ).parameters if len(UpperCAmelCase_ ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' f'''{processor.__class__} are passed to the logits processor.''' ) SCREAMING_SNAKE_CASE : int = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) else: SCREAMING_SNAKE_CASE : Dict = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : float ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not (temperature > 0): raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' ) SCREAMING_SNAKE_CASE : Optional[int] = temperature def __call__( self : List[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = scores / self.temperature return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : float , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (min_tokens_to_keep < 1): raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) SCREAMING_SNAKE_CASE : Optional[int] = top_p SCREAMING_SNAKE_CASE : str = filter_value SCREAMING_SNAKE_CASE : List[str] = min_tokens_to_keep def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = lax.top_k(UpperCAmelCase_ , scores.shape[-1] ) SCREAMING_SNAKE_CASE : str = jnp.full_like(UpperCAmelCase_ , self.filter_value ) SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.softmax(UpperCAmelCase_ , axis=-1 ).cumsum(axis=-1 ) SCREAMING_SNAKE_CASE : Tuple = cumulative_probs < self.top_p # include the token that is higher than top_p as well SCREAMING_SNAKE_CASE : Optional[int] = jnp.roll(UpperCAmelCase_ , 1 ) score_mask |= score_mask.at[:, 0].set(UpperCAmelCase_ ) # min tokens to keep SCREAMING_SNAKE_CASE : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = jnp.where(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jax.lax.sort_key_val(UpperCAmelCase_ , UpperCAmelCase_ )[-1] return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or top_k <= 0: raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) SCREAMING_SNAKE_CASE : List[str] = max(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = filter_value def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = scores.shape SCREAMING_SNAKE_CASE : List[str] = jnp.full(batch_size * vocab_size , self.filter_value ) SCREAMING_SNAKE_CASE : List[str] = min(self.top_k , scores.shape[-1] ) # Safety check SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = lax.top_k(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.broadcast_to((jnp.arange(UpperCAmelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() SCREAMING_SNAKE_CASE : List[str] = topk_scores.flatten() SCREAMING_SNAKE_CASE : List[Any] = topk_indices.flatten() + shift SCREAMING_SNAKE_CASE : Dict = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = next_scores_flat.reshape(UpperCAmelCase_ , UpperCAmelCase_ ) return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = bos_token_id def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.bool_(cur_len - 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = max_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : List[str] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : str = 1 - jnp.bool_(cur_len - self.max_length + 1 ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or min_length < 0: raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or eos_token_id < 0: raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) SCREAMING_SNAKE_CASE : List[str] = min_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): # create boolean flag to decide if min length penalty should be applied SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(UpperCAmelCase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = begin_index def __call__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index ) SCREAMING_SNAKE_CASE : List[str] = jnp.where(UpperCAmelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : List[Any] = list(UpperCAmelCase_ ) def __call__( self : Any , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Tuple = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : List[Any] = dict(UpperCAmelCase_ ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: SCREAMING_SNAKE_CASE : Any = force_token_array.at[index].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = jnp.intaa(UpperCAmelCase_ ) def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): def _force_token(UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : List[str] = scores.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = self.force_token_array[generation_idx] SCREAMING_SNAKE_CASE : Tuple = jnp.ones_like(UpperCAmelCase_ , dtype=scores.dtype ) * -float("inf" ) SCREAMING_SNAKE_CASE : Dict = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = lax.dynamic_update_slice(UpperCAmelCase_ , UpperCAmelCase_ , (0, current_token) ) return new_scores SCREAMING_SNAKE_CASE : Any = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase_ ) , lambda: scores , ) , ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Union[str, Any] = generate_config.eos_token_id SCREAMING_SNAKE_CASE : Tuple = generate_config.no_timestamps_token_id SCREAMING_SNAKE_CASE : List[Any] = generate_config.no_timestamps_token_id + 1 SCREAMING_SNAKE_CASE : Dict = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(UpperCAmelCase_ , "max_initial_timestamp_index" ): SCREAMING_SNAKE_CASE : List[Any] = generate_config.max_initial_timestamp_index else: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size if self.max_initial_timestamp_index is None: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size def __call__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): # suppress <|notimestamps|> which is handled by without_timestamps SCREAMING_SNAKE_CASE : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase_ , UpperCAmelCase_ , ) return jnp.where( UpperCAmelCase_ , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(cur_len == self.begin_index , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = self.timestamp_begin + self.max_initial_timestamp_index SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where( UpperCAmelCase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCAmelCase_ , ) # if sum of probability over timestamps is above any other token, sample timestamp SCREAMING_SNAKE_CASE : List[Any] = jax.nn.log_softmax(UpperCAmelCase_ , axis=-1 ) def handle_cumulative_probs(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) return scores
319
1
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
319
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys snake_case = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
319
1
from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = "geglu" , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = "layer_norm" , UpperCAmelCase_ : bool = False , ): super().__init__() SCREAMING_SNAKE_CASE : int = only_cross_attention SCREAMING_SNAKE_CASE : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero" SCREAMING_SNAKE_CASE : Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: SCREAMING_SNAKE_CASE : Any = AdaLayerNorm(UpperCAmelCase_ , UpperCAmelCase_ ) elif self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE : Dict = AdaLayerNormZero(UpperCAmelCase_ , UpperCAmelCase_ ) else: SCREAMING_SNAKE_CASE : int = nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = Attention( query_dim=UpperCAmelCase_ , heads=UpperCAmelCase_ , dim_head=UpperCAmelCase_ , dropout=UpperCAmelCase_ , bias=UpperCAmelCase_ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCAmelCase_ , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. SCREAMING_SNAKE_CASE : List[str] = ( AdaLayerNorm(UpperCAmelCase_ , UpperCAmelCase_ ) if self.use_ada_layer_norm else nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : int = Attention( query_dim=UpperCAmelCase_ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCAmelCase_ , dim_head=UpperCAmelCase_ , dropout=UpperCAmelCase_ , bias=UpperCAmelCase_ , upcast_attention=UpperCAmelCase_ , ) # is self-attn if encoder_hidden_states is none else: SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Optional[int] = None # 3. Feed-forward SCREAMING_SNAKE_CASE : Any = nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = FeedForward(UpperCAmelCase_ , dropout=UpperCAmelCase_ , activation_fn=UpperCAmelCase_ , final_dropout=UpperCAmelCase_ ) # let chunk size default to None SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : List[str] = 0 def _A ( self : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ): # Sets chunk feed-forward SCREAMING_SNAKE_CASE : Union[str, Any] = chunk_size SCREAMING_SNAKE_CASE : Tuple = dim def _A ( self : str , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[torch.LongTensor] = None , UpperCAmelCase_ : Dict[str, Any] = None , UpperCAmelCase_ : Optional[torch.LongTensor] = None , ): # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: SCREAMING_SNAKE_CASE : str = self.norma(UpperCAmelCase_ , UpperCAmelCase_ ) elif self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.norma( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hidden_dtype=hidden_states.dtype ) else: SCREAMING_SNAKE_CASE : Dict = self.norma(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {} SCREAMING_SNAKE_CASE : str = self.attna( UpperCAmelCase_ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , ) if self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE : Tuple = gate_msa.unsqueeze(1 ) * attn_output SCREAMING_SNAKE_CASE : int = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: SCREAMING_SNAKE_CASE : Optional[Any] = ( self.norma(UpperCAmelCase_ , UpperCAmelCase_ ) if self.use_ada_layer_norm else self.norma(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[Any] = self.attna( UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = attn_output + hidden_states # 3. Feed-forward SCREAMING_SNAKE_CASE : Dict = self.norma(UpperCAmelCase_ ) if self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE : Any = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) SCREAMING_SNAKE_CASE : List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size SCREAMING_SNAKE_CASE : int = torch.cat( [self.ff(UpperCAmelCase_ ) for hid_slice in norm_hidden_states.chunk(UpperCAmelCase_ , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: SCREAMING_SNAKE_CASE : int = self.ff(UpperCAmelCase_ ) if self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE : List[Any] = gate_mlp.unsqueeze(1 ) * ff_output SCREAMING_SNAKE_CASE : Union[str, Any] = ff_output + hidden_states return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 4 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : str = "geglu" , UpperCAmelCase_ : bool = False , ): super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = int(dim * mult ) SCREAMING_SNAKE_CASE : str = dim_out if dim_out is not None else dim if activation_fn == "gelu": SCREAMING_SNAKE_CASE : List[str] = GELU(UpperCAmelCase_ , UpperCAmelCase_ ) if activation_fn == "gelu-approximate": SCREAMING_SNAKE_CASE : Dict = GELU(UpperCAmelCase_ , UpperCAmelCase_ , approximate="tanh" ) elif activation_fn == "geglu": SCREAMING_SNAKE_CASE : Tuple = GEGLU(UpperCAmelCase_ , UpperCAmelCase_ ) elif activation_fn == "geglu-approximate": SCREAMING_SNAKE_CASE : str = ApproximateGELU(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] ) # project in self.net.append(UpperCAmelCase_ ) # project dropout self.net.append(nn.Dropout(UpperCAmelCase_ ) ) # project out self.net.append(nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(UpperCAmelCase_ ) ) def _A ( self : Optional[int] , UpperCAmelCase_ : Dict ): for module in self.net: SCREAMING_SNAKE_CASE : Optional[Any] = module(UpperCAmelCase_ ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str = "none" ): super().__init__() SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = approximate def _A ( self : List[str] , UpperCAmelCase_ : Any ): if gate.device.type != "mps": return F.gelu(UpperCAmelCase_ , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def _A ( self : Optional[Any] , UpperCAmelCase_ : str ): SCREAMING_SNAKE_CASE : List[str] = self.proj(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = self.gelu(UpperCAmelCase_ ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): super().__init__() SCREAMING_SNAKE_CASE : Any = nn.Linear(UpperCAmelCase_ , dim_out * 2 ) def _A ( self : Optional[Any] , UpperCAmelCase_ : Any ): if gate.device.type != "mps": return F.gelu(UpperCAmelCase_ ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def _A ( self : Any , UpperCAmelCase_ : List[str] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.proj(UpperCAmelCase_ ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(UpperCAmelCase_ ) class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): super().__init__() SCREAMING_SNAKE_CASE : Any = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Dict , UpperCAmelCase_ : str ): SCREAMING_SNAKE_CASE : str = self.proj(UpperCAmelCase_ ) return x * torch.sigmoid(1.702 * x ) class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ): super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = nn.SiLU() SCREAMING_SNAKE_CASE : Dict = nn.Linear(UpperCAmelCase_ , embedding_dim * 2 ) SCREAMING_SNAKE_CASE : List[Any] = nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ ) def _A ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : List[Any] = self.linear(self.silu(self.emb(UpperCAmelCase_ ) ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = torch.chunk(UpperCAmelCase_ , 2 ) SCREAMING_SNAKE_CASE : Tuple = self.norm(UpperCAmelCase_ ) * (1 + scale) + shift return x class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ): super().__init__() SCREAMING_SNAKE_CASE : int = CombinedTimestepLabelEmbeddings(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = nn.SiLU() SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(UpperCAmelCase_ , 6 * embedding_dim , bias=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ , eps=1E-6 ) def _A ( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int=None ): SCREAMING_SNAKE_CASE : Tuple = self.linear(self.silu(self.emb(UpperCAmelCase_ , UpperCAmelCase_ , hidden_dtype=UpperCAmelCase_ ) ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = emb.chunk(6 , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = self.norm(UpperCAmelCase_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : float = 1E-5 ): super().__init__() SCREAMING_SNAKE_CASE : str = num_groups SCREAMING_SNAKE_CASE : Optional[Any] = eps if act_fn is None: SCREAMING_SNAKE_CASE : Optional[int] = None else: SCREAMING_SNAKE_CASE : Any = get_activation(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = nn.Linear(UpperCAmelCase_ , out_dim * 2 ) def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ): if self.act: SCREAMING_SNAKE_CASE : str = self.act(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = self.linear(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = emb[:, :, None, None] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = emb.chunk(2 , dim=1 ) SCREAMING_SNAKE_CASE : str = F.group_norm(UpperCAmelCase_ , self.num_groups , eps=self.eps ) SCREAMING_SNAKE_CASE : int = x * (1 + scale) + shift return x
319
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
319
1
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = args.pruning_method SCREAMING_SNAKE_CASE : str = args.threshold SCREAMING_SNAKE_CASE : int = args.model_name_or_path.rstrip("/" ) SCREAMING_SNAKE_CASE : Tuple = args.target_model_path print(F'''Load fine-pruned model from {model_name_or_path}''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(os.path.join(lowercase , "pytorch_model.bin" ) ) SCREAMING_SNAKE_CASE : Tuple = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: SCREAMING_SNAKE_CASE : int = tensor print(F'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: SCREAMING_SNAKE_CASE : Optional[Any] = tensor print(F'''Copied layer {name}''' ) elif "bias" in name: SCREAMING_SNAKE_CASE : List[Any] = tensor print(F'''Copied layer {name}''' ) else: if pruning_method == "magnitude": SCREAMING_SNAKE_CASE : Tuple = MagnitudeBinarizer.apply(inputs=lowercase , threshold=lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue SCREAMING_SNAKE_CASE : int = name[:-6] SCREAMING_SNAKE_CASE : int = model[F'''{prefix_}mask_scores'''] SCREAMING_SNAKE_CASE : List[Any] = TopKBinarizer.apply(lowercase , lowercase ) SCREAMING_SNAKE_CASE : List[Any] = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue SCREAMING_SNAKE_CASE : List[str] = name[:-6] SCREAMING_SNAKE_CASE : Tuple = model[F'''{prefix_}mask_scores'''] SCREAMING_SNAKE_CASE : List[str] = ThresholdBinarizer.apply(lowercase , lowercase , lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue SCREAMING_SNAKE_CASE : Any = name[:-6] SCREAMING_SNAKE_CASE : Any = model[F'''{prefix_}mask_scores'''] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = -0.1, 1.1 SCREAMING_SNAKE_CASE : List[Any] = torch.sigmoid(lowercase ) SCREAMING_SNAKE_CASE : str = s * (r - l) + l SCREAMING_SNAKE_CASE : Optional[Any] = s_bar.clamp(min=0.0 , max=1.0 ) SCREAMING_SNAKE_CASE : Dict = tensor * mask print(F'''Pruned layer {name}''' ) else: raise ValueError("Unknown pruning method" ) if target_model_path is None: SCREAMING_SNAKE_CASE : int = os.path.join( os.path.dirname(lowercase ) , F'''bertarized_{os.path.basename(lowercase )}''' ) if not os.path.isdir(lowercase ): shutil.copytree(lowercase , lowercase ) print(F'''\nCreated folder {target_model_path}''' ) torch.save(lowercase , os.path.join(lowercase , "pytorch_model.bin" ) ) print("\nPruned model saved! See you later!" ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) snake_case = parser.parse_args() main(args)
319
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } snake_case = { """b0""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 224, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 240, """dropout_rate""": 0.2, """dw_padding""": [16], }, """b2""": { """hidden_dim""": 1_408, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 260, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 16], }, """b3""": { """hidden_dim""": 1_536, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 300, """dropout_rate""": 0.3, """dw_padding""": [5, 18], }, """b4""": { """hidden_dim""": 1_792, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 380, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_048, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 456, """dropout_rate""": 0.4, """dw_padding""": [13, 27], }, """b6""": { """hidden_dim""": 2_304, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 528, """dropout_rate""": 0.5, """dw_padding""": [31], }, """b7""": { """hidden_dim""": 2_560, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 600, """dropout_rate""": 0.5, """dw_padding""": [18], }, } def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = EfficientNetConfig() SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"] SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"] SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"] SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"] SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"] SCREAMING_SNAKE_CASE : str = "huggingface/label-files" SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE : str = 1000 SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor( size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , ) return preprocessor def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) ) SCREAMING_SNAKE_CASE : List[str] = len(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )} SCREAMING_SNAKE_CASE : Dict = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b] rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) SCREAMING_SNAKE_CASE : int = {} for item in rename_keys: if item[0] in original_param_names: SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1] SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight" SCREAMING_SNAKE_CASE : List[str] = "classifier.bias" return key_mapping def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue SCREAMING_SNAKE_CASE : str = key_mapping[key] if "_conv" in key and "kernel" in key: SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) ) else: SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name]( include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , ) SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: SCREAMING_SNAKE_CASE : Tuple = param.numpy() SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() ) # Load HuggingFace model SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase ) SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval() SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase ) replace_params(lowercase , lowercase , lowercase ) # Initialize preprocessor and preprocess input image SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase ) SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy() # Original model inference SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase ) SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 ) SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(F'''Pushing converted {model_name} to the hub...''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") snake_case = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
319
1
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor snake_case = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Union[str, Any] ): warnings.warn( "The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use BeitImageProcessor instead." , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
319
def lowerCamelCase__ ( ): """simple docstring""" return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] snake_case = generate_large_matrix() snake_case = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def lowerCamelCase__ ( lowercase ): """simple docstring""" assert all(row == sorted(lowercase , reverse=lowercase ) for row in grid ) assert all(list(lowercase ) == sorted(lowercase , reverse=lowercase ) for col in zip(*lowercase ) ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: SCREAMING_SNAKE_CASE : List[Any] = (left + right) // 2 SCREAMING_SNAKE_CASE : Optional[int] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: SCREAMING_SNAKE_CASE : List[Any] = mid + 1 else: SCREAMING_SNAKE_CASE : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : List[str] = len(grid[0] ) for i in range(len(lowercase ) ): SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] ) total += bound return (len(lowercase ) * len(grid[0] )) - total def lowerCamelCase__ ( lowercase ): """simple docstring""" return len([number for row in grid for number in row if number < 0] ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 0 for row in grid: for i, number in enumerate(lowercase ): if number < 0: total += len(lowercase ) - i break return total def lowerCamelCase__ ( ): """simple docstring""" from timeit import timeit print("Running benchmarks" ) SCREAMING_SNAKE_CASE : List[str] = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(F'''{func}(grid=grid)''' , setup=lowercase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
319
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case = logging.get_logger(__name__) snake_case = { """YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""", """YituTech/conv-bert-medium-small""": ( """https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json""" ), """YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""", # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[Any] = '''convbert''' def __init__( self : Any , UpperCAmelCase_ : List[Any]=3_0522 , UpperCAmelCase_ : Optional[int]=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Dict=3072 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : List[str]=512 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Optional[int]=768 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Optional[int]=9 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : int , ): super().__init__( pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Any = vocab_size SCREAMING_SNAKE_CASE : List[Any] = hidden_size SCREAMING_SNAKE_CASE : int = num_hidden_layers SCREAMING_SNAKE_CASE : Tuple = num_attention_heads SCREAMING_SNAKE_CASE : Dict = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : int = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Dict = max_position_embeddings SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size SCREAMING_SNAKE_CASE : Any = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps SCREAMING_SNAKE_CASE : str = embedding_size SCREAMING_SNAKE_CASE : Optional[int] = head_ratio SCREAMING_SNAKE_CASE : List[Any] = conv_kernel_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_groups SCREAMING_SNAKE_CASE : int = classifier_dropout class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' @property def _A ( self : List[Any] ): if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: SCREAMING_SNAKE_CASE : Union[str, Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
319
import argparse import os import torch from transformers.utils import WEIGHTS_NAME snake_case = ["""small""", """medium""", """large"""] snake_case = """lm_head.decoder.weight""" snake_case = """lm_head.weight""" def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase ) SCREAMING_SNAKE_CASE : Any = d.pop(lowercase ) os.makedirs(lowercase , exist_ok=lowercase ) torch.save(lowercase , os.path.join(lowercase , lowercase ) ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument("""--dialogpt_path""", default=""".""", type=str) snake_case = parser.parse_args() for MODEL in DIALOGPT_MODELS: snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""") snake_case = F"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
319
1
import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py snake_case = """src/transformers""" snake_case = """docs/source/en""" snake_case = """.""" def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" with open(lowercase , "r" , encoding="utf-8" , newline="\n" ) as f: SCREAMING_SNAKE_CASE : int = f.readlines() # Find the start prompt. SCREAMING_SNAKE_CASE : Dict = 0 while not lines[start_index].startswith(lowercase ): start_index += 1 start_index += 1 SCREAMING_SNAKE_CASE : List[str] = start_index while not lines[end_index].startswith(lowercase ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | snake_case = """Model|Encoder|Decoder|ForConditionalGeneration""" # Regexes that match TF/Flax/PT model names. snake_case = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") snake_case = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. snake_case = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # This is to make sure the transformers module imported is the one in the repo. snake_case = direct_transformers_import(TRANSFORMERS_PATH) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , lowercase ) return [m.group(0 ) for m in matches] def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = 2 if text == "✅" or text == "❌" else len(lowercase ) SCREAMING_SNAKE_CASE : List[str] = (width - text_length) // 2 SCREAMING_SNAKE_CASE : Union[str, Any] = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES SCREAMING_SNAKE_CASE : Dict = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } SCREAMING_SNAKE_CASE : Dict = {name: config.replace("Config" , "" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. SCREAMING_SNAKE_CASE : Union[str, Any] = collections.defaultdict(lowercase ) SCREAMING_SNAKE_CASE : List[Any] = collections.defaultdict(lowercase ) SCREAMING_SNAKE_CASE : Tuple = collections.defaultdict(lowercase ) SCREAMING_SNAKE_CASE : List[Any] = collections.defaultdict(lowercase ) SCREAMING_SNAKE_CASE : List[str] = collections.defaultdict(lowercase ) # Let's lookup through all transformers object (once). for attr_name in dir(lowercase ): SCREAMING_SNAKE_CASE : List[Any] = None if attr_name.endswith("Tokenizer" ): SCREAMING_SNAKE_CASE : str = slow_tokenizers SCREAMING_SNAKE_CASE : Any = attr_name[:-9] elif attr_name.endswith("TokenizerFast" ): SCREAMING_SNAKE_CASE : Tuple = fast_tokenizers SCREAMING_SNAKE_CASE : Tuple = attr_name[:-13] elif _re_tf_models.match(lowercase ) is not None: SCREAMING_SNAKE_CASE : Any = tf_models SCREAMING_SNAKE_CASE : int = _re_tf_models.match(lowercase ).groups()[0] elif _re_flax_models.match(lowercase ) is not None: SCREAMING_SNAKE_CASE : Tuple = flax_models SCREAMING_SNAKE_CASE : str = _re_flax_models.match(lowercase ).groups()[0] elif _re_pt_models.match(lowercase ) is not None: SCREAMING_SNAKE_CASE : Union[str, Any] = pt_models SCREAMING_SNAKE_CASE : Optional[Any] = _re_pt_models.match(lowercase ).groups()[0] if lookup_dict is not None: while len(lowercase ) > 0: if attr_name in model_name_to_prefix.values(): SCREAMING_SNAKE_CASE : Dict = True break # Try again after removing the last word in the name SCREAMING_SNAKE_CASE : str = "".join(camel_case_split(lowercase )[:-1] ) # Let's build that table! SCREAMING_SNAKE_CASE : Optional[Any] = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) SCREAMING_SNAKE_CASE : Optional[Any] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). SCREAMING_SNAKE_CASE : Any = [len(lowercase ) + 2 for c in columns] SCREAMING_SNAKE_CASE : Optional[int] = max([len(lowercase ) for name in model_names] ) + 2 # Build the table per se SCREAMING_SNAKE_CASE : str = "|" + "|".join([_center_text(lowercase , lowercase ) for c, w in zip(lowercase , lowercase )] ) + "|\n" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n" SCREAMING_SNAKE_CASE : List[Any] = {True: "✅", False: "❌"} for name in model_names: SCREAMING_SNAKE_CASE : List[str] = model_name_to_prefix[name] SCREAMING_SNAKE_CASE : int = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(lowercase , lowercase ) for l, w in zip(lowercase , lowercase )] ) + "|\n" return table def lowerCamelCase__ ( lowercase=False ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = _find_text_in_file( filename=os.path.join(lowercase , "index.md" ) , start_prompt="<!--This table is updated automatically from the auto modules" , end_prompt="<!-- End table-->" , ) SCREAMING_SNAKE_CASE : List[str] = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(lowercase , "index.md" ) , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( "The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") snake_case = parser.parse_args() check_model_table(args.fix_and_overwrite)
319
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available snake_case = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) snake_case = { """configuration_clip""": [ """CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CLIPConfig""", """CLIPOnnxConfig""", """CLIPTextConfig""", """CLIPVisionConfig""", ], """processing_clip""": ["""CLIPProcessor"""], """tokenization_clip""": ["""CLIPTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""CLIPTokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""CLIPFeatureExtractor"""] snake_case = ["""CLIPImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """CLIPModel""", """CLIPPreTrainedModel""", """CLIPTextModel""", """CLIPTextModelWithProjection""", """CLIPVisionModel""", """CLIPVisionModelWithProjection""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCLIPModel""", """TFCLIPPreTrainedModel""", """TFCLIPTextModel""", """TFCLIPVisionModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """FlaxCLIPModel""", """FlaxCLIPPreTrainedModel""", """FlaxCLIPTextModel""", """FlaxCLIPTextPreTrainedModel""", """FlaxCLIPVisionModel""", """FlaxCLIPVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def lowerCamelCase__ ( ): """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
319
1
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): snake_case = """pt""" elif is_tf_available(): snake_case = """tf""" else: snake_case = """jax""" class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : str = ByTaTokenizer UpperCamelCase_ : str = False def _A ( self : Tuple ): super().setUp() SCREAMING_SNAKE_CASE : Tuple = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _A ( self : Tuple ): return ByTaTokenizer.from_pretrained("google/byt5-small" ) def _A ( self : str , **UpperCAmelCase_ : List[Any] ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ) def _A ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : int=20 , UpperCAmelCase_ : List[str]=5 ): # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. SCREAMING_SNAKE_CASE : Any = [] for i in range(len(UpperCAmelCase_ ) ): try: SCREAMING_SNAKE_CASE : int = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) SCREAMING_SNAKE_CASE : Optional[Any] = list(filter(lambda UpperCAmelCase_ : re.match(r"^[ a-zA-Z]+$" , t[1] ) , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : List[Any] = list(filter(lambda UpperCAmelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase_ ) , UpperCAmelCase_ ) ) if max_length is not None and len(UpperCAmelCase_ ) > max_length: SCREAMING_SNAKE_CASE : Dict = toks[:max_length] if min_length is not None and len(UpperCAmelCase_ ) < min_length and len(UpperCAmelCase_ ) > 0: while len(UpperCAmelCase_ ) < min_length: SCREAMING_SNAKE_CASE : Dict = toks + toks # toks_str = [t[1] for t in toks] SCREAMING_SNAKE_CASE : Any = [t[0] for t in toks] # Ensure consistency SCREAMING_SNAKE_CASE : str = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) if " " not in output_txt and len(UpperCAmelCase_ ) > 1: SCREAMING_SNAKE_CASE : Any = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase_ ) + " " + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase_ ) ) if with_prefix_space: SCREAMING_SNAKE_CASE : int = " " + output_txt SCREAMING_SNAKE_CASE : Any = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) return output_txt, output_ids def _A ( self : Dict ): SCREAMING_SNAKE_CASE : List[str] = self.ta_base_tokenizer SCREAMING_SNAKE_CASE : List[str] = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["hi", "I went to the gym", ""] ) self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] ) def _A ( self : int ): SCREAMING_SNAKE_CASE : Optional[int] = self.ta_base_tokenizer SCREAMING_SNAKE_CASE : List[str] = "Unicode €." SCREAMING_SNAKE_CASE : int = tokenizer(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["input_ids"] , UpperCAmelCase_ ) # decoding SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , "Unicode €.</s>" ) SCREAMING_SNAKE_CASE : str = tokenizer("e è é ê ë" ) SCREAMING_SNAKE_CASE : Optional[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["input_ids"] , UpperCAmelCase_ ) # decoding SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , "e è é ê ë</s>" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : Any = self.ta_base_tokenizer SCREAMING_SNAKE_CASE : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."] # fmt: off SCREAMING_SNAKE_CASE : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on SCREAMING_SNAKE_CASE : Dict = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) if FRAMEWORK != "jax": SCREAMING_SNAKE_CASE : Union[str, Any] = list(batch.input_ids.numpy()[0] ) else: SCREAMING_SNAKE_CASE : List[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def _A ( self : int ): SCREAMING_SNAKE_CASE : List[str] = self.ta_base_tokenizer SCREAMING_SNAKE_CASE : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] SCREAMING_SNAKE_CASE : Tuple = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids" , UpperCAmelCase_ ) self.assertIn("attention_mask" , UpperCAmelCase_ ) self.assertNotIn("decoder_input_ids" , UpperCAmelCase_ ) self.assertNotIn("decoder_attention_mask" , UpperCAmelCase_ ) def _A ( self : str ): SCREAMING_SNAKE_CASE : Optional[Any] = self.ta_base_tokenizer SCREAMING_SNAKE_CASE : Union[str, Any] = [ "Summary of the text.", "Another summary.", ] SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer( text_target=UpperCAmelCase_ , max_length=32 , padding="max_length" , truncation=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) self.assertEqual(32 , targets["input_ids"].shape[1] ) def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : str = self.ta_base_tokenizer SCREAMING_SNAKE_CASE : Any = ["A long paragraph for summarization. </s>"] SCREAMING_SNAKE_CASE : Dict = ["Summary of the text. </s>"] # fmt: off SCREAMING_SNAKE_CASE : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] SCREAMING_SNAKE_CASE : List[str] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on SCREAMING_SNAKE_CASE : int = tokenizer(UpperCAmelCase_ , text_target=UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , batch["input_ids"][0] ) self.assertEqual(UpperCAmelCase_ , batch["labels"][0] ) def _A ( self : Optional[Any] ): # safety check on max_len default value so we are sure the test works SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : List[Any] = " He is very happy, UNwant\u00E9d,running" SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) tokenizer.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.__class__.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = after_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) shutil.rmtree(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Tuple = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"] ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token" ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) SCREAMING_SNAKE_CASE : Any = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) tokenizer.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = tokenizer.__class__.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = after_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) SCREAMING_SNAKE_CASE : int = tokenizer.__class__.from_pretrained(UpperCAmelCase_ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(UpperCAmelCase_ ) def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : Optional[int] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCAmelCase_ ) with open(os.path.join(UpperCAmelCase_ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: SCREAMING_SNAKE_CASE : List[Any] = json.load(UpperCAmelCase_ ) with open(os.path.join(UpperCAmelCase_ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: SCREAMING_SNAKE_CASE : List[str] = json.load(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = [f'''<extra_id_{i}>''' for i in range(125 )] SCREAMING_SNAKE_CASE : List[str] = added_tokens_extra_ids + [ "an_additional_special_token" ] SCREAMING_SNAKE_CASE : Optional[int] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(UpperCAmelCase_ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(os.path.join(UpperCAmelCase_ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_class.from_pretrained( UpperCAmelCase_ , ) self.assertIn( "an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained SCREAMING_SNAKE_CASE : str = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=UpperCAmelCase_ )] SCREAMING_SNAKE_CASE : Dict = tokenizer_class.from_pretrained( UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , ) self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens ) self.assertEqual( ["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , ) def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : Any = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_class.from_pretrained(UpperCAmelCase_ ) self.assertTrue(tokenizer.decode([255] ) == "" ) def _A ( self : Optional[int] ): pass def _A ( self : Optional[Any] ): pass def _A ( self : Tuple ): pass def _A ( self : Optional[int] ): pass def _A ( self : Tuple ): # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizers(fast=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): SCREAMING_SNAKE_CASE : Optional[int] = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"] SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_tokens_to_string(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): SCREAMING_SNAKE_CASE : Optional[int] = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] SCREAMING_SNAKE_CASE : List[str] = 0 SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_ids_to_tokens( UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) for attr in attributes_list: setattr(UpperCAmelCase_ , attr + "_id" , UpperCAmelCase_ ) self.assertEqual(getattr(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(getattr(UpperCAmelCase_ , attr + "_id" ) , UpperCAmelCase_ ) setattr(UpperCAmelCase_ , attr + "_id" , UpperCAmelCase_ ) self.assertEqual(getattr(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(getattr(UpperCAmelCase_ , attr + "_id" ) , UpperCAmelCase_ ) setattr(UpperCAmelCase_ , "additional_special_tokens_ids" , [] ) self.assertListEqual(getattr(UpperCAmelCase_ , "additional_special_tokens" ) , [] ) self.assertListEqual(getattr(UpperCAmelCase_ , "additional_special_tokens_ids" ) , [] ) setattr(UpperCAmelCase_ , "additional_special_tokens_ids" , [token_id_to_test_setters] ) self.assertListEqual(getattr(UpperCAmelCase_ , "additional_special_tokens" ) , [token_to_test_setters] ) self.assertListEqual(getattr(UpperCAmelCase_ , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
319
class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : Union[str, Any] = set_counts SCREAMING_SNAKE_CASE : Any = max(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [1] * num_sets SCREAMING_SNAKE_CASE : List[str] = list(range(UpperCAmelCase_ ) ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.get_parent(UpperCAmelCase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 SCREAMING_SNAKE_CASE : List[str] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Tuple = src_parent SCREAMING_SNAKE_CASE : Optional[int] = self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Optional[Any] = max(self.max_set , UpperCAmelCase_ ) return True def _A ( self : Tuple , UpperCAmelCase_ : int ): if self.parents[disj_set] == disj_set: return disj_set SCREAMING_SNAKE_CASE : Tuple = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
319
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) def lowerCamelCase__ ( lowercase , lowercase=False , lowercase=False , lowercase=False ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') ) # embeddings rename_keys.extend( [ # text embeddings ("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"), ( "text_embeddings.position_embeddings.weight", "vilt.embeddings.text_embeddings.position_embeddings.weight", ), ("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"), ( "text_embeddings.token_type_embeddings.weight", "vilt.embeddings.text_embeddings.token_type_embeddings.weight", ), ("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"), ("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"), # patch embeddings ("transformer.cls_token", "vilt.embeddings.cls_token"), ("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"), ("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"), ("transformer.pos_embed", "vilt.embeddings.position_embeddings"), # token type embeddings ("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"), ] ) # final layernorm + pooler rename_keys.extend( [ ("transformer.norm.weight", "vilt.layernorm.weight"), ("transformer.norm.bias", "vilt.layernorm.bias"), ("pooler.dense.weight", "vilt.pooler.dense.weight"), ("pooler.dense.bias", "vilt.pooler.dense.bias"), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ("vqa_classifier.0.weight", "classifier.0.weight"), ("vqa_classifier.0.bias", "classifier.0.bias"), ("vqa_classifier.1.weight", "classifier.1.weight"), ("vqa_classifier.1.bias", "classifier.1.bias"), ("vqa_classifier.3.weight", "classifier.3.weight"), ("vqa_classifier.3.bias", "classifier.3.bias"), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ("nlvr2_classifier.0.weight", "classifier.0.weight"), ("nlvr2_classifier.0.bias", "classifier.0.bias"), ("nlvr2_classifier.1.weight", "classifier.1.weight"), ("nlvr2_classifier.1.bias", "classifier.1.bias"), ("nlvr2_classifier.3.weight", "classifier.3.weight"), ("nlvr2_classifier.3.bias", "classifier.3.bias"), ] ) else: pass return rename_keys def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" for i in range(config.num_hidden_layers ): SCREAMING_SNAKE_CASE : int = "vilt." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE : Dict = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE : str = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : str = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE : int = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE : Any = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE : int = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[-config.hidden_size :] def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(lowercase , lowercase ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = dct.pop(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = val @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=lowercase ) SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : str = False SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : Tuple = False if "vqa" in checkpoint_url: SCREAMING_SNAKE_CASE : Optional[Any] = True SCREAMING_SNAKE_CASE : Tuple = 3129 SCREAMING_SNAKE_CASE : Union[str, Any] = "huggingface/label-files" SCREAMING_SNAKE_CASE : int = "vqa2-id2label.json" SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE : Optional[Any] = {int(lowercase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : List[str] = idalabel SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Tuple = ViltForQuestionAnswering(lowercase ) elif "nlvr" in checkpoint_url: SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : List[str] = 2 SCREAMING_SNAKE_CASE : List[Any] = {0: "False", 1: "True"} SCREAMING_SNAKE_CASE : Any = {v: k for k, v in config.idalabel.items()} SCREAMING_SNAKE_CASE : Any = 3 SCREAMING_SNAKE_CASE : List[Any] = ViltForImagesAndTextClassification(lowercase ) elif "irtr" in checkpoint_url: SCREAMING_SNAKE_CASE : Dict = True SCREAMING_SNAKE_CASE : List[Any] = ViltForImageAndTextRetrieval(lowercase ) elif "mlm_itm" in checkpoint_url: SCREAMING_SNAKE_CASE : List[Any] = True SCREAMING_SNAKE_CASE : Any = ViltForMaskedLM(lowercase ) else: raise ValueError("Unknown model type" ) # load state_dict of original model, remove and rename some keys SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" )["state_dict"] SCREAMING_SNAKE_CASE : int = create_rename_keys(lowercase , lowercase , lowercase , lowercase ) for src, dest in rename_keys: rename_key(lowercase , lowercase , lowercase ) read_in_q_k_v(lowercase , lowercase ) if mlm_model or irtr_model: SCREAMING_SNAKE_CASE : Optional[int] = ["itm_score.fc.weight", "itm_score.fc.bias"] for k in ignore_keys: state_dict.pop(lowercase , lowercase ) # load state dict into HuggingFace model model.eval() if mlm_model: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = model.load_state_dict(lowercase , strict=lowercase ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(lowercase ) # Define processor SCREAMING_SNAKE_CASE : Tuple = ViltImageProcessor(size=384 ) SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained("bert-base-uncased" ) SCREAMING_SNAKE_CASE : List[str] = ViltProcessor(lowercase , lowercase ) # Forward pass on example inputs (image + text) if nlvr_model: SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=lowercase ).raw ) SCREAMING_SNAKE_CASE : Optional[int] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=lowercase ).raw ) SCREAMING_SNAKE_CASE : Union[str, Any] = ( "The left image contains twice the number of dogs as the right image, and at least two dogs in total are" " standing." ) SCREAMING_SNAKE_CASE : Union[str, Any] = processor(lowercase , lowercase , return_tensors="pt" ) SCREAMING_SNAKE_CASE : Optional[int] = processor(lowercase , lowercase , return_tensors="pt" ) SCREAMING_SNAKE_CASE : Dict = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: SCREAMING_SNAKE_CASE : Any = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=lowercase ).raw ) if mlm_model: SCREAMING_SNAKE_CASE : Dict = "a bunch of [MASK] laying on a [MASK]." else: SCREAMING_SNAKE_CASE : str = "How many cats are there?" SCREAMING_SNAKE_CASE : List[Any] = processor(lowercase , lowercase , return_tensors="pt" ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(**lowercase ) # Verify outputs if mlm_model: SCREAMING_SNAKE_CASE : List[Any] = torch.Size([1, 11, 30522] ) SCREAMING_SNAKE_CASE : Any = torch.tensor([-12.5061, -12.5123, -12.5174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , lowercase , atol=1E-4 ) # verify masked token prediction equals "cats" SCREAMING_SNAKE_CASE : str = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: SCREAMING_SNAKE_CASE : Dict = torch.Size([1, 3129] ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([-15.9495, -18.1472, -10.3041] ) assert torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , lowercase , atol=1E-4 ) # verify vqa prediction equals "2" SCREAMING_SNAKE_CASE : List[str] = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size([1, 2] ) SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([-2.8721, 2.1291] ) assert torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(lowercase ).mkdir(exist_ok=lowercase ) print(F'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase ) processor.save_pretrained(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) snake_case = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
319
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''timm_backbone''' def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ): super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = backbone SCREAMING_SNAKE_CASE : List[str] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = features_only SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
319
1
import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase=1024 ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], [] SCREAMING_SNAKE_CASE : Union[str, Any] = list(zip(lowercase , lowercase ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = sorted_examples[0] def is_too_big(lowercase ): return tok(lowercase , return_tensors="pt" ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): SCREAMING_SNAKE_CASE : Union[str, Any] = new_src + " " + src SCREAMING_SNAKE_CASE : Optional[Any] = new_tgt + " " + tgt if is_too_big(lowercase ) or is_too_big(lowercase ): # cant fit, finalize example finished_src.append(lowercase ) finished_tgt.append(lowercase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = src, tgt else: # can fit, keep adding SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(lowercase ) finished_tgt.append(lowercase ) return finished_src, finished_tgt def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = Path(lowercase ) save_path.mkdir(exist_ok=lowercase ) for split in ["train"]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = data_dir / F'''{split}.source''', data_dir / F'''{split}.target''' SCREAMING_SNAKE_CASE : int = [x.rstrip() for x in Path(lowercase ).open().readlines()] SCREAMING_SNAKE_CASE : List[Any] = [x.rstrip() for x in Path(lowercase ).open().readlines()] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = pack_examples(lowercase , lowercase , lowercase , lowercase ) print(F'''packed {split} split from {len(lowercase )} examples -> {len(lowercase )}.''' ) Path(save_path / F'''{split}.source''' ).open("w" ).write("\n".join(lowercase ) ) Path(save_path / F'''{split}.target''' ).open("w" ).write("\n".join(lowercase ) ) for split in ["val", "test"]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = data_dir / F'''{split}.source''', data_dir / F'''{split}.target''' shutil.copyfile(lowercase , save_path / F'''{split}.source''' ) shutil.copyfile(lowercase , save_path / F'''{split}.target''' ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser() parser.add_argument("--tok_name" , type=lowercase , help="like facebook/bart-large-cnn,t5-base, etc." ) parser.add_argument("--max_seq_len" , type=lowercase , default=128 ) parser.add_argument("--data_dir" , type=lowercase ) parser.add_argument("--save_path" , type=lowercase ) SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(lowercase , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
319
from math import sqrt def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 for i in range(1 , int(sqrt(lowercase ) + 1 ) ): if n % i == 0 and i != sqrt(lowercase ): total += i + n // i elif i == sqrt(lowercase ): total += i return total - n def lowerCamelCase__ ( lowercase = 10000 ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = sum( i for i in range(1 , lowercase ) if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
319
1
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin snake_case = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right snake_case = 250_004 snake_case = 250_020 @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : List[Any] = MBartTokenizer UpperCamelCase_ : Dict = MBartTokenizerFast UpperCamelCase_ : Optional[int] = True UpperCamelCase_ : Optional[int] = True def _A ( self : Union[str, Any] ): super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE : List[Any] = MBartTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def _A ( self : Dict ): SCREAMING_SNAKE_CASE : Union[str, Any] = MBartTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) SCREAMING_SNAKE_CASE : int = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def _A ( self : Dict ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return SCREAMING_SNAKE_CASE : Union[str, Any] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) SCREAMING_SNAKE_CASE : List[str] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCAmelCase_ ) # Save tokenizer rust, legacy_format=True SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : int = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it save with the same files self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE : str = tokenizer_r.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) shutil.rmtree(UpperCAmelCase_ ) # Save tokenizer rust, legacy_format=False SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) shutil.rmtree(UpperCAmelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = '''facebook/mbart-large-en-ro''' UpperCamelCase_ : Any = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] UpperCamelCase_ : Optional[Any] = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] UpperCamelCase_ : List[Any] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def _A ( cls : Union[str, Any] ): SCREAMING_SNAKE_CASE : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" ) SCREAMING_SNAKE_CASE : int = 1 return cls def _A ( self : Union[str, Any] ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_0020 ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ ) def _A ( self : Tuple ): self.assertIn(UpperCAmelCase_ , self.tokenizer.all_special_ids ) SCREAMING_SNAKE_CASE : Tuple = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase_ ) def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : Union[str, Any] = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = 10 SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , UpperCAmelCase_ ) self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) def _A ( self : Optional[int] ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_0026, 25_0001] ) def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = MBartTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase_ ) @require_torch def _A ( self : int ): SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , return_tensors="pt" ) SCREAMING_SNAKE_CASE : int = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : str = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) SCREAMING_SNAKE_CASE : str = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) SCREAMING_SNAKE_CASE : Dict = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def _A ( self : Dict ): SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.src_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=3 , return_tensors="pt" ) SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer( text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=10 , return_tensors="pt" ) SCREAMING_SNAKE_CASE : Tuple = targets["input_ids"] SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(UpperCAmelCase_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _A ( self : str ): SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" ) self.assertEqual( nested_simplify(UpperCAmelCase_ ) , { # A, test, EOS, en_XX "input_ids": [[62, 3034, 2, 25_0004]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 25_0001, } , )
319
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) snake_case = { """configuration_encodec""": [ """ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EncodecConfig""", ], """feature_extraction_encodec""": ["""EncodecFeatureExtractor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""", """EncodecModel""", """EncodecPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging snake_case = logging.get_logger(__name__) if is_vision_available(): import PIL class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = ['''pixel_values'''] def __init__( self : List[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : bool = True , **UpperCAmelCase_ : Optional[Any] , ): super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {"shortest_edge": 224} SCREAMING_SNAKE_CASE : Tuple = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224} SCREAMING_SNAKE_CASE : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ , param_name="crop_size" ) SCREAMING_SNAKE_CASE : Optional[Any] = do_resize SCREAMING_SNAKE_CASE : Optional[Any] = size SCREAMING_SNAKE_CASE : Optional[Any] = resample SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop SCREAMING_SNAKE_CASE : Optional[Any] = crop_size SCREAMING_SNAKE_CASE : str = do_rescale SCREAMING_SNAKE_CASE : Any = rescale_factor SCREAMING_SNAKE_CASE : Dict = do_normalize SCREAMING_SNAKE_CASE : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN SCREAMING_SNAKE_CASE : str = image_std if image_std is not None else OPENAI_CLIP_STD SCREAMING_SNAKE_CASE : Dict = do_convert_rgb def _A ( self : int , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Tuple , ): SCREAMING_SNAKE_CASE : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase_ , size=size["shortest_edge"] , default_to_square=UpperCAmelCase_ ) return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : str , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : int , ): SCREAMING_SNAKE_CASE : List[str] = get_size_dict(UpperCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCAmelCase_ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : str , ): return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ): return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : Dict , ): SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE : Dict = size if size is not None else self.size SCREAMING_SNAKE_CASE : Dict = get_size_dict(UpperCAmelCase_ , param_name="size" , default_to_square=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE : Any = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE : Dict = get_size_dict(UpperCAmelCase_ , param_name="crop_size" , default_to_square=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE : Optional[Any] = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb SCREAMING_SNAKE_CASE : Tuple = make_list_of_images(UpperCAmelCase_ ) if not valid_images(UpperCAmelCase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: SCREAMING_SNAKE_CASE : int = [convert_to_rgb(UpperCAmelCase_ ) for image in images] # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE : List[Any] = [to_numpy_array(UpperCAmelCase_ ) for image in images] if do_resize: SCREAMING_SNAKE_CASE : int = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images] if do_center_crop: SCREAMING_SNAKE_CASE : List[str] = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_ ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE : List[str] = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE : Dict = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ ) for image in images] SCREAMING_SNAKE_CASE : int = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images] SCREAMING_SNAKE_CASE : List[Any] = {"pixel_values": images} return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
319
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: snake_case = None snake_case = logging.get_logger(__name__) snake_case = """▁""" snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} snake_case = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } snake_case = { """google/pegasus-xsum""": 512, } class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = PegasusTokenizer UpperCamelCase_ : str = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="<mask_2>" , UpperCAmelCase_ : Optional[int]="<mask_1>" , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=103 , **UpperCAmelCase_ : Optional[int] , ): SCREAMING_SNAKE_CASE : Optional[Any] = offset if additional_special_tokens is not None: if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise TypeError( f'''additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is''' f''' {type(UpperCAmelCase_ )}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(UpperCAmelCase_ ) , self.offset - 1 ) ] if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) SCREAMING_SNAKE_CASE : int = additional_special_tokens_extended else: SCREAMING_SNAKE_CASE : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : str = vocab_file SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def _A ( self : int , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ): if already_has_special_tokens: return self._special_token_mask(UpperCAmelCase_ ) elif token_ids_a is None: return self._special_token_mask(UpperCAmelCase_ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(UpperCAmelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : List[str] = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) return (out_vocab_file,)
319
1
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING snake_case = { """facebook/mask2former-swin-small-coco-instance""": ( """https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json""" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } snake_case = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = '''mask2former''' UpperCamelCase_ : Optional[Any] = ['''swin'''] UpperCamelCase_ : List[Any] = {'''hidden_size''': '''hidden_dim'''} def __init__( self : List[Any] , UpperCAmelCase_ : Optional[Dict] = None , UpperCAmelCase_ : int = 256 , UpperCAmelCase_ : int = 256 , UpperCAmelCase_ : int = 256 , UpperCAmelCase_ : int = 1024 , UpperCAmelCase_ : str = "relu" , UpperCAmelCase_ : int = 6 , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 2048 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : int = 4 , UpperCAmelCase_ : int = 255 , UpperCAmelCase_ : int = 100 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 2.0 , UpperCAmelCase_ : float = 5.0 , UpperCAmelCase_ : float = 5.0 , UpperCAmelCase_ : int = 1_2544 , UpperCAmelCase_ : float = 3.0 , UpperCAmelCase_ : float = 0.75 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : List[int] = [4, 8, 16, 32] , UpperCAmelCase_ : bool = None , **UpperCAmelCase_ : List[str] , ): if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." ) SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAPPING["swin"]( image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=UpperCAmelCase_ , out_features=["stage1", "stage2", "stage3", "stage4"] , ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Optional[Any] = backbone_config.pop("model_type" ) SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE : List[Any] = config_class.from_dict(UpperCAmelCase_ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. ''' f'''Supported model types: {','.join(self.backbones_supported )}''' ) SCREAMING_SNAKE_CASE : List[str] = backbone_config SCREAMING_SNAKE_CASE : Optional[int] = feature_size SCREAMING_SNAKE_CASE : Union[str, Any] = mask_feature_size SCREAMING_SNAKE_CASE : str = hidden_dim SCREAMING_SNAKE_CASE : Tuple = encoder_feedforward_dim SCREAMING_SNAKE_CASE : Optional[int] = activation_function SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers SCREAMING_SNAKE_CASE : str = decoder_layers SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads SCREAMING_SNAKE_CASE : str = dropout SCREAMING_SNAKE_CASE : List[str] = dim_feedforward SCREAMING_SNAKE_CASE : List[Any] = pre_norm SCREAMING_SNAKE_CASE : List[str] = enforce_input_projection SCREAMING_SNAKE_CASE : List[Any] = common_stride SCREAMING_SNAKE_CASE : int = ignore_value SCREAMING_SNAKE_CASE : List[Any] = num_queries SCREAMING_SNAKE_CASE : Any = no_object_weight SCREAMING_SNAKE_CASE : List[Any] = class_weight SCREAMING_SNAKE_CASE : List[Any] = mask_weight SCREAMING_SNAKE_CASE : Dict = dice_weight SCREAMING_SNAKE_CASE : Any = train_num_points SCREAMING_SNAKE_CASE : Tuple = oversample_ratio SCREAMING_SNAKE_CASE : Optional[int] = importance_sample_ratio SCREAMING_SNAKE_CASE : List[Any] = init_std SCREAMING_SNAKE_CASE : List[Any] = init_xavier_std SCREAMING_SNAKE_CASE : int = use_auxiliary_loss SCREAMING_SNAKE_CASE : Dict = feature_strides SCREAMING_SNAKE_CASE : Optional[Any] = output_auxiliary_logits SCREAMING_SNAKE_CASE : Tuple = decoder_layers super().__init__(**UpperCAmelCase_ ) @classmethod def _A ( cls : str , UpperCAmelCase_ : PretrainedConfig , **UpperCAmelCase_ : int ): return cls( backbone_config=UpperCAmelCase_ , **UpperCAmelCase_ , ) def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : int = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE : Tuple = self.__class__.model_type return output
319
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available snake_case = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""SpeechEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""FlaxSpeechEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
319
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case = 16 snake_case = 32 def lowerCamelCase__ ( lowercase , lowercase = 16 ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE : List[Any] = datasets.map( lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE : str = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE : Optional[Any] = 8 else: SCREAMING_SNAKE_CASE : Union[str, Any] = None return tokenizer.pad( lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case = mocked_dataloaders # noqa: F811 def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1": SCREAMING_SNAKE_CASE : int = 2 # New Code # SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE : Tuple = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE : Any = config["lr"] SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] ) SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] ) SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" ) set_seed(lowercase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase ) # Instantiate scheduler SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare( lowercase , lowercase , lowercase , lowercase , lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase ): SCREAMING_SNAKE_CASE : Any = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = output.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowercase , references=lowercase , ) SCREAMING_SNAKE_CASE : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , lowercase ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE : List[str] = parser.parse_args() SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowercase , lowercase ) if __name__ == "__main__": main()
319
1
import fire from utils import calculate_rouge, save_json def lowerCamelCase__ ( lowercase , lowercase , lowercase=None , **lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = [x.strip() for x in open(lowercase ).readlines()] SCREAMING_SNAKE_CASE : int = [x.strip() for x in open(lowercase ).readlines()][: len(lowercase )] SCREAMING_SNAKE_CASE : Dict = calculate_rouge(lowercase , lowercase , **lowercase ) if save_path is not None: save_json(lowercase , lowercase , indent=lowercase ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
319
import functools def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if not isinstance(lowercase , lowercase ) or not all(isinstance(lowercase , lowercase ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(lowercase ) != 3 or not all(isinstance(lowercase , lowercase ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(lowercase ) == 0: return 0 if min(lowercase ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(lowercase ) >= 366: raise ValueError("All days elements should be less than 366" ) SCREAMING_SNAKE_CASE : Dict = set(lowercase ) @functools.cache def dynamic_programming(lowercase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging snake_case = logging.get_logger(__name__) def lowerCamelCase__ ( lowercase ): """simple docstring""" if isinstance(lowercase , np.ndarray ): return list(tensor.shape ) SCREAMING_SNAKE_CASE : Optional[Any] = tf.shape(lowercase ) if tensor.shape == tf.TensorShape(lowercase ): return dynamic SCREAMING_SNAKE_CASE : Optional[Any] = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(lowercase )] def lowerCamelCase__ ( lowercase , lowercase = None , lowercase = None ): """simple docstring""" return tf.nn.softmax(logits=logits + 1E-9 , axis=lowercase , name=lowercase ) def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase=1E-5 , lowercase=-1 ): """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowercase , lowercase ): raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." ) # Get mean and variance on the axis to be normalized SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = tf.nn.moments(lowercase , axes=[axis] , keepdims=lowercase ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis SCREAMING_SNAKE_CASE : Optional[int] = [1] * inputs.shape.rank SCREAMING_SNAKE_CASE : int = shape_list(lowercase )[axis] SCREAMING_SNAKE_CASE : Tuple = tf.reshape(lowercase , lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = tf.reshape(lowercase , lowercase ) # Compute layer normalization using the batch_normalization # function. SCREAMING_SNAKE_CASE : List[Any] = tf.nn.batch_normalization( lowercase , lowercase , lowercase , offset=lowercase , scale=lowercase , variance_epsilon=lowercase , ) return outputs def lowerCamelCase__ ( lowercase , lowercase=0 , lowercase=-1 ): """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input SCREAMING_SNAKE_CASE : List[str] = tf.shape(lowercase ) SCREAMING_SNAKE_CASE : Any = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) SCREAMING_SNAKE_CASE : Tuple = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(lowercase , lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" if not isinstance(lowercase , tf.Tensor ): SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor(lowercase ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: SCREAMING_SNAKE_CASE : Optional[int] = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: SCREAMING_SNAKE_CASE : Any = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) SCREAMING_SNAKE_CASE : str = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowerCamelCase__ ( lowercase , lowercase , lowercase = "input_ids" ): """simple docstring""" tf.debugging.assert_less( lowercase , tf.cast(lowercase , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(lowercase )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. SCREAMING_SNAKE_CASE : Optional[int] = [x for x in data if len(lowercase ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( "The following attributes cannot be saved to HDF5 file because " F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) SCREAMING_SNAKE_CASE : Dict = np.asarray(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : List[Any] = np.array_split(lowercase , lowercase ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 SCREAMING_SNAKE_CASE : Dict = np.array_split(lowercase , lowercase ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(lowercase ): SCREAMING_SNAKE_CASE : List[str] = chunk_data else: SCREAMING_SNAKE_CASE : List[Any] = data def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if name in group.attrs: SCREAMING_SNAKE_CASE : int = [n.decode("utf8" ) if hasattr(lowercase , "decode" ) else n for n in group.attrs[name]] else: SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : int = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("utf8" ) if hasattr(lowercase , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] ) chunk_id += 1 return data def lowerCamelCase__ ( lowercase ): """simple docstring""" def _expand_single_ad_tensor(lowercase ): if isinstance(lowercase , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(lowercase , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , lowercase )
319
def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
319
1
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def lowerCamelCase__ ( lowercase = 8 ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ascii_letters + digits + punctuation return "".join(secrets.choice(lowercase ) for _ in range(lowercase ) ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" i -= len(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = i // 3 SCREAMING_SNAKE_CASE : List[Any] = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) SCREAMING_SNAKE_CASE : Any = ( chars_incl + random(lowercase , quotient + remainder ) + random(lowercase , lowercase ) + random(lowercase , lowercase ) ) SCREAMING_SNAKE_CASE : Dict = list(lowercase ) shuffle(lowercase ) return "".join(lowercase ) # random is a generalised function for letters, characters and numbers def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" return "".join(secrets.choice(lowercase ) for _ in range(lowercase ) ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" pass # Put your code here... def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" pass # Put your code here... def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" pass # Put your code here... def lowerCamelCase__ ( lowercase , lowercase = 8 ): """simple docstring""" if len(lowercase ) < min_length: # Your Password must be at least 8 characters long return False SCREAMING_SNAKE_CASE : Tuple = any(char in ascii_uppercase for char in password ) SCREAMING_SNAKE_CASE : Optional[int] = any(char in ascii_lowercase for char in password ) SCREAMING_SNAKE_CASE : Optional[int] = any(char in digits for char in password ) SCREAMING_SNAKE_CASE : str = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = int(input("Please indicate the max length of your password: " ).strip() ) SCREAMING_SNAKE_CASE : List[str] = input( "Please indicate the characters that must be in your password: " ).strip() print("Password generated:" , password_generator(lowercase ) ) print( "Alternative Password generated:" , alternative_password_generator(lowercase , lowercase ) , ) print("[If you are thinking of using this passsword, You better save it.]" ) if __name__ == "__main__": main()
319
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = [ ("""bert.bert""", """visual_bert"""), ("""bert.cls""", """cls"""), ("""bert.classifier""", """cls"""), ("""token_type_embeddings_visual""", """visual_token_type_embeddings"""), ("""position_embeddings_visual""", """visual_position_embeddings"""), ("""projection""", """visual_projection"""), ] snake_case = [ """nlvr2_coco_pre_trained.th""", """nlvr2_fine_tuned.th""", """nlvr2_pre_trained.th""", """vcr_coco_pre_train.th""", """vcr_fine_tune.th""", """vcr_pre_train.th""", """vqa_coco_pre_trained.th""", """vqa_fine_tuned.th""", """vqa_pre_trained.th""", ] def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" ) return sd def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = OrderedDict() SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue SCREAMING_SNAKE_CASE : Optional[Any] = key for name_pair in rename_keys_prefix: SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] ) SCREAMING_SNAKE_CASE : Union[str, Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: SCREAMING_SNAKE_CASE : str = "pretraining" if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512} SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice" elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048} SCREAMING_SNAKE_CASE : Any = "vqa_advanced" elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129} SCREAMING_SNAKE_CASE : Tuple = "vqa" elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : int = { "visual_embedding_dim": 1024, "num_labels": 2, } SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr" SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase ) # Load State Dict SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase ) if model_type == "pretraining": SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase ) elif model_type == "vqa": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase ) elif model_type == "nlvr": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase ) elif model_type == "multichoice": SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase ) model.load_state_dict(lowercase ) # Save Checkpoints Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""") snake_case = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
319
1
def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if not isinstance(lowercase , lowercase ): raise ValueError("iterations must be defined as integers" ) if not isinstance(lowercase , lowercase ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) SCREAMING_SNAKE_CASE : List[Any] = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(lowercase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
319
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''ClapFeatureExtractor''' UpperCamelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ): super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("sampling_rate" , UpperCAmelCase_ ) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none." ) if text is not None: SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if audios is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor( UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None and audios is not None: SCREAMING_SNAKE_CASE : Optional[Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ ) def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def _A ( self : str ): SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
319
1
import os import pytest from transformers.dynamic_module_utils import get_imports snake_case = """ import os """ snake_case = """ def foo(): import os return False """ snake_case = """ def foo(): def bar(): if True: import os return False return bar() """ snake_case = """ import os try: import bar except ImportError: raise ValueError() """ snake_case = """ import os def foo(): try: import bar except ImportError: raise ValueError() """ snake_case = """ import os try: import bar except (ImportError, AttributeError): raise ValueError() """ snake_case = """ import os try: import bar except ImportError as e: raise ValueError() """ snake_case = """ import os try: import bar except: raise ValueError() """ snake_case = """ import os try: import bar import baz except ImportError: raise ValueError() """ snake_case = """ import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ snake_case = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize("case" , lowercase ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(lowercase , "test_file.py" ) with open(lowercase , "w" ) as _tmp_file: _tmp_file.write(lowercase ) SCREAMING_SNAKE_CASE : int = get_imports(lowercase ) assert parsed_imports == ["os"]
319
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert isinstance(lowercase , lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache" SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : Optional[int] = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path elif issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ): """simple docstring""" assert isinstance(lowercase , lowercase ) for split in splits: SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tmp_path / "cache" SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : str = ParquetDatasetReader( {"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if split: SCREAMING_SNAKE_CASE : Any = {split: parquet_path} else: SCREAMING_SNAKE_CASE : Tuple = "train" SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path} SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" ) SCREAMING_SNAKE_CASE : List[Any] = pf.read() assert dataset.data.table == output_table def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]} SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} ) SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert get_writer_batch_size(lowercase ) == expected
319
1
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""0.12.2"""): raise Exception("""requires fairseq >= 0.12.2""") if version.parse(fairseq.__version__) > version.parse("""2"""): raise Exception("""requires fairseq < v2""") logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = """Hello, World!""" snake_case = """en_XX""" def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = Path("data_bin" ) SCREAMING_SNAKE_CASE : int = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(lowercase ).parent ) , checkpoint_file=Path(lowercase ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(lowercase ) , bpe="sentencepiece" , sentencepiece_model=str(Path(lowercase ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , ) xmod.eval() # disable dropout print(lowercase ) SCREAMING_SNAKE_CASE : int = xmod.model.encoder.sentence_encoder SCREAMING_SNAKE_CASE : int = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: SCREAMING_SNAKE_CASE : List[Any] = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our X-MOD config:" , lowercase ) SCREAMING_SNAKE_CASE : str = XmodForSequenceClassification(lowercase ) if classification_head else XmodForMaskedLM(lowercase ) model.eval() # Now let's copy all the weights. # Embeddings SCREAMING_SNAKE_CASE : Dict = xmod_sent_encoder.embed_tokens.weight SCREAMING_SNAKE_CASE : Union[str, Any] = xmod_sent_encoder.embed_positions.weight SCREAMING_SNAKE_CASE : Dict = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. SCREAMING_SNAKE_CASE : List[str] = xmod_sent_encoder.layernorm_embedding.weight SCREAMING_SNAKE_CASE : Optional[Any] = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer SCREAMING_SNAKE_CASE : List[Any] = model.roberta.encoder.layer[i] SCREAMING_SNAKE_CASE : Dict = xmod_sent_encoder.layers[i] # self attention SCREAMING_SNAKE_CASE : Dict = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError("Dimensions of self-attention weights do not match." ) SCREAMING_SNAKE_CASE : Optional[int] = xmod_layer.self_attn.q_proj.weight SCREAMING_SNAKE_CASE : Tuple = xmod_layer.self_attn.q_proj.bias SCREAMING_SNAKE_CASE : Optional[int] = xmod_layer.self_attn.k_proj.weight SCREAMING_SNAKE_CASE : Union[str, Any] = xmod_layer.self_attn.k_proj.bias SCREAMING_SNAKE_CASE : Optional[Any] = xmod_layer.self_attn.v_proj.weight SCREAMING_SNAKE_CASE : Tuple = xmod_layer.self_attn.v_proj.bias # self-attention output SCREAMING_SNAKE_CASE : Union[str, Any] = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("Dimensions of self-attention output weights do not match." ) SCREAMING_SNAKE_CASE : Dict = xmod_layer.self_attn.out_proj.weight SCREAMING_SNAKE_CASE : str = xmod_layer.self_attn.out_proj.bias SCREAMING_SNAKE_CASE : str = xmod_layer.self_attn_layer_norm.weight SCREAMING_SNAKE_CASE : str = xmod_layer.self_attn_layer_norm.bias # intermediate SCREAMING_SNAKE_CASE : List[Any] = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of intermediate weights do not match." ) SCREAMING_SNAKE_CASE : Union[str, Any] = xmod_layer.fca.weight SCREAMING_SNAKE_CASE : List[Any] = xmod_layer.fca.bias # output SCREAMING_SNAKE_CASE : int = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of feed-forward weights do not match." ) SCREAMING_SNAKE_CASE : Dict = xmod_layer.fca.weight SCREAMING_SNAKE_CASE : Tuple = xmod_layer.fca.bias SCREAMING_SNAKE_CASE : Optional[int] = xmod_layer.final_layer_norm.weight SCREAMING_SNAKE_CASE : Any = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: SCREAMING_SNAKE_CASE : Union[str, Any] = xmod_layer.adapter_layer_norm.weight SCREAMING_SNAKE_CASE : int = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError("Lists of language adapters do not match." ) for lang_code, adapter in xmod_layer.adapter_modules.items(): SCREAMING_SNAKE_CASE : Tuple = bert_output.adapter_modules[lang_code] SCREAMING_SNAKE_CASE : int = xmod_layer.adapter_modules[lang_code] SCREAMING_SNAKE_CASE : List[str] = from_adapter.fca.weight SCREAMING_SNAKE_CASE : Tuple = from_adapter.fca.bias SCREAMING_SNAKE_CASE : Any = from_adapter.fca.weight SCREAMING_SNAKE_CASE : Dict = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: SCREAMING_SNAKE_CASE : List[Any] = xmod_sent_encoder.layer_norm.weight SCREAMING_SNAKE_CASE : Optional[int] = xmod_sent_encoder.layer_norm.bias if classification_head: SCREAMING_SNAKE_CASE : Any = xmod.model.classification_heads["mnli"].dense.weight SCREAMING_SNAKE_CASE : List[str] = xmod.model.classification_heads["mnli"].dense.bias SCREAMING_SNAKE_CASE : Dict = xmod.model.classification_heads["mnli"].out_proj.weight SCREAMING_SNAKE_CASE : Tuple = xmod.model.classification_heads["mnli"].out_proj.bias else: # LM Head SCREAMING_SNAKE_CASE : Union[str, Any] = xmod.model.encoder.lm_head.dense.weight SCREAMING_SNAKE_CASE : int = xmod.model.encoder.lm_head.dense.bias SCREAMING_SNAKE_CASE : Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight SCREAMING_SNAKE_CASE : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.bias SCREAMING_SNAKE_CASE : Tuple = xmod.model.encoder.lm_head.weight SCREAMING_SNAKE_CASE : int = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. SCREAMING_SNAKE_CASE : Dict = xmod.encode(lowercase ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(lowercase ) SCREAMING_SNAKE_CASE : List[str] = model(lowercase )[0] if classification_head: SCREAMING_SNAKE_CASE : str = xmod.model.classification_heads["mnli"](xmod.extract_features(lowercase ) ) else: SCREAMING_SNAKE_CASE : Tuple = xmod.model(lowercase , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.max(torch.abs(our_output - their_output ) ).item() print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 SCREAMING_SNAKE_CASE : Optional[Any] = torch.allclose(lowercase , lowercase , atol=1E-3 ) print("Do both models output the same tensors?" , "🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) Path(lowercase ).mkdir(parents=lowercase , exist_ok=lowercase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) snake_case = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
319
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FocalNetForImageClassification""", """FocalNetForMaskedImageModeling""", """FocalNetBackbone""", """FocalNetModel""", """FocalNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging snake_case = logging.get_logger(__name__) snake_case = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = '''gptj''' UpperCamelCase_ : Tuple = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : str , UpperCAmelCase_ : Optional[int]=5_0400 , UpperCAmelCase_ : Tuple=2048 , UpperCAmelCase_ : int=4096 , UpperCAmelCase_ : Union[str, Any]=28 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Dict="gelu_new" , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : List[str]=1E-5 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=5_0256 , UpperCAmelCase_ : Tuple=5_0256 , UpperCAmelCase_ : int=False , **UpperCAmelCase_ : Optional[Any] , ): SCREAMING_SNAKE_CASE : Any = vocab_size SCREAMING_SNAKE_CASE : Any = n_positions SCREAMING_SNAKE_CASE : List[Any] = n_embd SCREAMING_SNAKE_CASE : Optional[Any] = n_layer SCREAMING_SNAKE_CASE : Union[str, Any] = n_head SCREAMING_SNAKE_CASE : Any = n_inner SCREAMING_SNAKE_CASE : Tuple = rotary_dim SCREAMING_SNAKE_CASE : Any = activation_function SCREAMING_SNAKE_CASE : Any = resid_pdrop SCREAMING_SNAKE_CASE : int = embd_pdrop SCREAMING_SNAKE_CASE : Optional[Any] = attn_pdrop SCREAMING_SNAKE_CASE : int = layer_norm_epsilon SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : List[Any] = use_cache SCREAMING_SNAKE_CASE : List[Any] = bos_token_id SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id super().__init__( bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , tie_word_embeddings=UpperCAmelCase_ , **UpperCAmelCase_ ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : PretrainedConfig , UpperCAmelCase_ : str = "default" , UpperCAmelCase_ : List[PatchingSpec] = None , UpperCAmelCase_ : bool = False , ): super().__init__(UpperCAmelCase_ , task=UpperCAmelCase_ , patching_specs=UpperCAmelCase_ , use_past=UpperCAmelCase_ ) if not getattr(self._config , "pad_token_id" , UpperCAmelCase_ ): # TODO: how to do that better? SCREAMING_SNAKE_CASE : List[str] = 0 @property def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : List[str] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(UpperCAmelCase_ , direction="inputs" ) SCREAMING_SNAKE_CASE : Any = {0: "batch", 1: "past_sequence + sequence"} else: SCREAMING_SNAKE_CASE : Tuple = {0: "batch", 1: "sequence"} return common_inputs @property def _A ( self : Any ): return self._config.n_layer @property def _A ( self : Dict ): return self._config.n_head def _A ( self : List[str] , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ): SCREAMING_SNAKE_CASE : str = super(UpperCAmelCase_ , self ).generate_dummy_inputs( UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ ) # We need to order the input in the way they appears in the forward() SCREAMING_SNAKE_CASE : str = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = common_inputs["input_ids"].shape # Not using the same length for past_key_values SCREAMING_SNAKE_CASE : Optional[Any] = seqlen + 2 SCREAMING_SNAKE_CASE : Dict = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) SCREAMING_SNAKE_CASE : Tuple = [ (torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ )) for _ in range(self.num_layers ) ] SCREAMING_SNAKE_CASE : Any = common_inputs["attention_mask"] if self.use_past: SCREAMING_SNAKE_CASE : Union[str, Any] = ordered_inputs["attention_mask"].dtype SCREAMING_SNAKE_CASE : Any = torch.cat( [ordered_inputs["attention_mask"], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ , dtype=UpperCAmelCase_ )] , dim=1 ) return ordered_inputs @property def _A ( self : str ): return 13
319
def lowerCamelCase__ ( lowercase , lowercase = 0 ): """simple docstring""" SCREAMING_SNAKE_CASE : int = length or len(lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i] SCREAMING_SNAKE_CASE : str = True return list_data if not swapped else bubble_sort(lowercase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case = { """configuration_clipseg""": [ """CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CLIPSegConfig""", """CLIPSegTextConfig""", """CLIPSegVisionConfig""", ], """processing_clipseg""": ["""CLIPSegProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""", """CLIPSegModel""", """CLIPSegPreTrainedModel""", """CLIPSegTextModel""", """CLIPSegVisionModel""", """CLIPSegForImageSegmentation""", ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger snake_case = get_logger(__name__) snake_case = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : str , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[int] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ): for processor in self: SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(processor.__call__ ).parameters if len(UpperCAmelCase_ ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' f'''{processor.__class__} are passed to the logits processor.''' ) SCREAMING_SNAKE_CASE : int = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) else: SCREAMING_SNAKE_CASE : Dict = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : float ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not (temperature > 0): raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' ) SCREAMING_SNAKE_CASE : Optional[int] = temperature def __call__( self : List[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = scores / self.temperature return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : float , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (min_tokens_to_keep < 1): raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) SCREAMING_SNAKE_CASE : Optional[int] = top_p SCREAMING_SNAKE_CASE : str = filter_value SCREAMING_SNAKE_CASE : List[str] = min_tokens_to_keep def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = lax.top_k(UpperCAmelCase_ , scores.shape[-1] ) SCREAMING_SNAKE_CASE : str = jnp.full_like(UpperCAmelCase_ , self.filter_value ) SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.softmax(UpperCAmelCase_ , axis=-1 ).cumsum(axis=-1 ) SCREAMING_SNAKE_CASE : Tuple = cumulative_probs < self.top_p # include the token that is higher than top_p as well SCREAMING_SNAKE_CASE : Optional[int] = jnp.roll(UpperCAmelCase_ , 1 ) score_mask |= score_mask.at[:, 0].set(UpperCAmelCase_ ) # min tokens to keep SCREAMING_SNAKE_CASE : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = jnp.where(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jax.lax.sort_key_val(UpperCAmelCase_ , UpperCAmelCase_ )[-1] return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or top_k <= 0: raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) SCREAMING_SNAKE_CASE : List[str] = max(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = filter_value def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = scores.shape SCREAMING_SNAKE_CASE : List[str] = jnp.full(batch_size * vocab_size , self.filter_value ) SCREAMING_SNAKE_CASE : List[str] = min(self.top_k , scores.shape[-1] ) # Safety check SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = lax.top_k(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.broadcast_to((jnp.arange(UpperCAmelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() SCREAMING_SNAKE_CASE : List[str] = topk_scores.flatten() SCREAMING_SNAKE_CASE : List[Any] = topk_indices.flatten() + shift SCREAMING_SNAKE_CASE : Dict = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = next_scores_flat.reshape(UpperCAmelCase_ , UpperCAmelCase_ ) return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = bos_token_id def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.bool_(cur_len - 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = max_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : List[str] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : str = 1 - jnp.bool_(cur_len - self.max_length + 1 ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or min_length < 0: raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or eos_token_id < 0: raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) SCREAMING_SNAKE_CASE : List[str] = min_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): # create boolean flag to decide if min length penalty should be applied SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(UpperCAmelCase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = begin_index def __call__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index ) SCREAMING_SNAKE_CASE : List[str] = jnp.where(UpperCAmelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : List[Any] = list(UpperCAmelCase_ ) def __call__( self : Any , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Tuple = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : List[Any] = dict(UpperCAmelCase_ ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: SCREAMING_SNAKE_CASE : Any = force_token_array.at[index].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = jnp.intaa(UpperCAmelCase_ ) def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): def _force_token(UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : List[str] = scores.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = self.force_token_array[generation_idx] SCREAMING_SNAKE_CASE : Tuple = jnp.ones_like(UpperCAmelCase_ , dtype=scores.dtype ) * -float("inf" ) SCREAMING_SNAKE_CASE : Dict = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = lax.dynamic_update_slice(UpperCAmelCase_ , UpperCAmelCase_ , (0, current_token) ) return new_scores SCREAMING_SNAKE_CASE : Any = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase_ ) , lambda: scores , ) , ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Union[str, Any] = generate_config.eos_token_id SCREAMING_SNAKE_CASE : Tuple = generate_config.no_timestamps_token_id SCREAMING_SNAKE_CASE : List[Any] = generate_config.no_timestamps_token_id + 1 SCREAMING_SNAKE_CASE : Dict = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(UpperCAmelCase_ , "max_initial_timestamp_index" ): SCREAMING_SNAKE_CASE : List[Any] = generate_config.max_initial_timestamp_index else: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size if self.max_initial_timestamp_index is None: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size def __call__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): # suppress <|notimestamps|> which is handled by without_timestamps SCREAMING_SNAKE_CASE : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase_ , UpperCAmelCase_ , ) return jnp.where( UpperCAmelCase_ , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(cur_len == self.begin_index , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = self.timestamp_begin + self.max_initial_timestamp_index SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where( UpperCAmelCase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCAmelCase_ , ) # if sum of probability over timestamps is above any other token, sample timestamp SCREAMING_SNAKE_CASE : List[Any] = jax.nn.log_softmax(UpperCAmelCase_ , axis=-1 ) def handle_cumulative_probs(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) return scores
319
1
from __future__ import annotations def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase , ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(lowercase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowercase , lowercase , ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : list[list[str]] = [] depth_first_search([] , [] , [] , lowercase , lowercase ) # Print all the boards for board in boards: for column in board: print(lowercase ) print("" ) print(len(lowercase ) , "solutions were found." ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
319
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys snake_case = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
319
1
from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : int = '''new-model''' if is_tf_available(): class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = NewModelConfig @require_tf class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @slow def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : Optional[int] = "bert-base-cased" SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : List[str] = "bert-base-cased" SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : List[str] ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : Optional[Any] ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : int ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : Tuple ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : Optional[int] ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : List[str] ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) @slow @require_tensorflow_probability def _A ( self : Optional[Any] ): for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForTableQuestionAnswering.from_pretrained( UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : List[str] = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(model.num_parameters() , 1_4410 ) self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase_ ) , 1_4410 ) def _A ( self : int ): SCREAMING_SNAKE_CASE : int = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(model.num_parameters() , 1_4410 ) self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase_ ) , 1_4410 ) def _A ( self : int ): # For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel SCREAMING_SNAKE_CASE : Tuple = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(model.config ) SCREAMING_SNAKE_CASE : Optional[Any] = ["FunnelBaseModel"] SCREAMING_SNAKE_CASE : str = TFAutoModel.from_config(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = TFAutoModel.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Optional[int] ): try: AutoConfig.register("new-model" , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(UpperCAmelCase_ ): auto_class.register(UpperCAmelCase_ , UpperCAmelCase_ ) auto_class.register(UpperCAmelCase_ , UpperCAmelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCAmelCase_ ): auto_class.register(UpperCAmelCase_ , UpperCAmelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE : Tuple = BertModelTester(self ).get_config() SCREAMING_SNAKE_CASE : Optional[int] = NewModelConfig(**tiny_config.to_dict() ) SCREAMING_SNAKE_CASE : Optional[Any] = auto_class.from_config(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = auto_class.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def _A ( self : Optional[int] ): with self.assertRaisesRegex( UpperCAmelCase_ , "bert-base is not a local folder and is not a valid model identifier" ): SCREAMING_SNAKE_CASE : Dict = TFAutoModel.from_pretrained("bert-base" ) def _A ( self : Dict ): with self.assertRaisesRegex( UpperCAmelCase_ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModel.from_pretrained(UpperCAmelCase_ , revision="aaaaaa" ) def _A ( self : Tuple ): with self.assertRaisesRegex( UpperCAmelCase_ , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ): SCREAMING_SNAKE_CASE : Dict = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" ) def _A ( self : Optional[int] ): with self.assertRaisesRegex(UpperCAmelCase_ , "Use `from_pt=True` to load this model" ): SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" ) def _A ( self : Tuple ): # Make sure we have cached the model. SCREAMING_SNAKE_CASE : int = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" ) with RequestCounter() as counter: SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint SCREAMING_SNAKE_CASE : Any = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" ) with RequestCounter() as counter: SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
319
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
319
1
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = '''Wav2Vec2FeatureExtractor''' UpperCamelCase_ : Any = '''AutoTokenizer''' def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ): super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor SCREAMING_SNAKE_CASE : Dict = False @classmethod def _A ( cls : Optional[Any] , UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict ): try: return super().from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) except OSError: warnings.warn( f'''Loading a tokenizer inside {cls.__name__} from a config that does not''' " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: " , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = WavaVecaCTCTokenizer.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) return cls(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ ) def __call__( self : Optional[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_ ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("raw_speech" ) else: SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("audio" , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = kwargs.pop("sampling_rate" , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("text" , UpperCAmelCase_ ) if len(UpperCAmelCase_ ) > 0: SCREAMING_SNAKE_CASE : int = args[0] SCREAMING_SNAKE_CASE : List[Any] = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: SCREAMING_SNAKE_CASE : str = self.feature_extractor(UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None: SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ ) if text is None: return inputs elif audio is None: return encodings else: SCREAMING_SNAKE_CASE : Optional[Any] = encodings["input_ids"] return inputs def _A ( self : Union[str, Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict ): # For backward compatibility if self._in_target_context_manager: return self.current_processor.pad(*UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("input_features" , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = kwargs.pop("labels" , UpperCAmelCase_ ) if len(UpperCAmelCase_ ) > 0: SCREAMING_SNAKE_CASE : Optional[int] = args[0] SCREAMING_SNAKE_CASE : int = args[1:] if input_features is not None: SCREAMING_SNAKE_CASE : List[str] = self.feature_extractor.pad(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ ) if labels is not None: SCREAMING_SNAKE_CASE : str = self.tokenizer.pad(UpperCAmelCase_ , **UpperCAmelCase_ ) if labels is None: return input_features elif input_features is None: return labels else: SCREAMING_SNAKE_CASE : str = labels["input_ids"] return input_features def _A ( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : Optional[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : str ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @contextmanager def _A ( self : Union[str, Any] ): warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) SCREAMING_SNAKE_CASE : Union[str, Any] = True SCREAMING_SNAKE_CASE : List[str] = self.tokenizer yield SCREAMING_SNAKE_CASE : Dict = self.feature_extractor SCREAMING_SNAKE_CASE : int = False
319
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } snake_case = { """b0""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 224, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 240, """dropout_rate""": 0.2, """dw_padding""": [16], }, """b2""": { """hidden_dim""": 1_408, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 260, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 16], }, """b3""": { """hidden_dim""": 1_536, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 300, """dropout_rate""": 0.3, """dw_padding""": [5, 18], }, """b4""": { """hidden_dim""": 1_792, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 380, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_048, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 456, """dropout_rate""": 0.4, """dw_padding""": [13, 27], }, """b6""": { """hidden_dim""": 2_304, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 528, """dropout_rate""": 0.5, """dw_padding""": [31], }, """b7""": { """hidden_dim""": 2_560, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 600, """dropout_rate""": 0.5, """dw_padding""": [18], }, } def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = EfficientNetConfig() SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"] SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"] SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"] SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"] SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"] SCREAMING_SNAKE_CASE : str = "huggingface/label-files" SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE : str = 1000 SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor( size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , ) return preprocessor def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) ) SCREAMING_SNAKE_CASE : List[str] = len(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )} SCREAMING_SNAKE_CASE : Dict = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b] rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) SCREAMING_SNAKE_CASE : int = {} for item in rename_keys: if item[0] in original_param_names: SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1] SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight" SCREAMING_SNAKE_CASE : List[str] = "classifier.bias" return key_mapping def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue SCREAMING_SNAKE_CASE : str = key_mapping[key] if "_conv" in key and "kernel" in key: SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) ) else: SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name]( include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , ) SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: SCREAMING_SNAKE_CASE : Tuple = param.numpy() SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() ) # Load HuggingFace model SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase ) SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval() SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase ) replace_params(lowercase , lowercase , lowercase ) # Initialize preprocessor and preprocess input image SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase ) SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy() # Original model inference SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase ) SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 ) SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(F'''Pushing converted {model_name} to the hub...''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") snake_case = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
319
1
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _A ( self : Any ): SCREAMING_SNAKE_CASE : Optional[int] = inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) SCREAMING_SNAKE_CASE : List[Any] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) SCREAMING_SNAKE_CASE : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def _A ( self : Any ): print(f'''Found {torch.cuda.device_count()} devices.''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() ) @require_multi_gpu def _A ( self : str ): print(f'''Found {torch.cuda.device_count()} devices.''' ) SCREAMING_SNAKE_CASE : List[Any] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(f'''Command: {cmd}''' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() ) @require_multi_gpu def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : List[Any] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() ) @require_multi_gpu def _A ( self : Union[str, Any] ): print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() ) if __name__ == "__main__": snake_case = Accelerator() snake_case = (accelerator.state.process_index + 2, 10) snake_case = torch.randint(0, 10, shape).to(accelerator.device) snake_case = """""" snake_case = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." snake_case = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." snake_case = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
319
def lowerCamelCase__ ( ): """simple docstring""" return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] snake_case = generate_large_matrix() snake_case = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def lowerCamelCase__ ( lowercase ): """simple docstring""" assert all(row == sorted(lowercase , reverse=lowercase ) for row in grid ) assert all(list(lowercase ) == sorted(lowercase , reverse=lowercase ) for col in zip(*lowercase ) ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: SCREAMING_SNAKE_CASE : List[Any] = (left + right) // 2 SCREAMING_SNAKE_CASE : Optional[int] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: SCREAMING_SNAKE_CASE : List[Any] = mid + 1 else: SCREAMING_SNAKE_CASE : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : List[str] = len(grid[0] ) for i in range(len(lowercase ) ): SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] ) total += bound return (len(lowercase ) * len(grid[0] )) - total def lowerCamelCase__ ( lowercase ): """simple docstring""" return len([number for row in grid for number in row if number < 0] ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 0 for row in grid: for i, number in enumerate(lowercase ): if number < 0: total += len(lowercase ) - i break return total def lowerCamelCase__ ( ): """simple docstring""" from timeit import timeit print("Running benchmarks" ) SCREAMING_SNAKE_CASE : List[str] = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(F'''{func}(grid=grid)''' , setup=lowercase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
319
1
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys snake_case = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
319
import argparse import os import torch from transformers.utils import WEIGHTS_NAME snake_case = ["""small""", """medium""", """large"""] snake_case = """lm_head.decoder.weight""" snake_case = """lm_head.weight""" def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase ) SCREAMING_SNAKE_CASE : Any = d.pop(lowercase ) os.makedirs(lowercase , exist_ok=lowercase ) torch.save(lowercase , os.path.join(lowercase , lowercase ) ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument("""--dialogpt_path""", default=""".""", type=str) snake_case = parser.parse_args() for MODEL in DIALOGPT_MODELS: snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""") snake_case = F"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
319
1
import pickle import numpy as np from matplotlib import pyplot as plt class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int=0.2 , UpperCAmelCase_ : Any=0.2 ): SCREAMING_SNAKE_CASE : Optional[int] = bp_numa SCREAMING_SNAKE_CASE : Optional[int] = bp_numa SCREAMING_SNAKE_CASE : Optional[Any] = bp_numa SCREAMING_SNAKE_CASE : List[Any] = conva_get[:2] SCREAMING_SNAKE_CASE : Dict = conva_get[2] SCREAMING_SNAKE_CASE : List[str] = size_pa SCREAMING_SNAKE_CASE : List[Any] = rate_w SCREAMING_SNAKE_CASE : List[str] = rate_t SCREAMING_SNAKE_CASE : Any = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] SCREAMING_SNAKE_CASE : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) SCREAMING_SNAKE_CASE : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) SCREAMING_SNAKE_CASE : List[Any] = -2 * np.random.rand(self.conva[1] ) + 1 SCREAMING_SNAKE_CASE : List[str] = -2 * np.random.rand(self.num_bpa ) + 1 SCREAMING_SNAKE_CASE : str = -2 * np.random.rand(self.num_bpa ) + 1 def _A ( self : int , UpperCAmelCase_ : List[str] ): # save model dict with pickle SCREAMING_SNAKE_CASE : List[Any] = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(UpperCAmelCase_ , "wb" ) as f: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) print(f'''Model saved: {save_path}''' ) @classmethod def _A ( cls : int , UpperCAmelCase_ : Union[str, Any] ): # read saved model with open(UpperCAmelCase_ , "rb" ) as f: SCREAMING_SNAKE_CASE : List[Any] = pickle.load(UpperCAmelCase_ ) # noqa: S301 SCREAMING_SNAKE_CASE : Optional[Any] = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) SCREAMING_SNAKE_CASE : Optional[int] = model_dic.get("size_pooling1" ) SCREAMING_SNAKE_CASE : List[str] = model_dic.get("num_bp1" ) SCREAMING_SNAKE_CASE : Optional[Any] = model_dic.get("num_bp2" ) SCREAMING_SNAKE_CASE : Any = model_dic.get("num_bp3" ) SCREAMING_SNAKE_CASE : Dict = model_dic.get("rate_weight" ) SCREAMING_SNAKE_CASE : Optional[Any] = model_dic.get("rate_thre" ) # create model instance SCREAMING_SNAKE_CASE : List[str] = CNN(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # modify model parameter SCREAMING_SNAKE_CASE : Dict = model_dic.get("w_conv1" ) SCREAMING_SNAKE_CASE : List[str] = model_dic.get("wkj" ) SCREAMING_SNAKE_CASE : str = model_dic.get("vji" ) SCREAMING_SNAKE_CASE : Optional[int] = model_dic.get("thre_conv1" ) SCREAMING_SNAKE_CASE : Dict = model_dic.get("thre_bp2" ) SCREAMING_SNAKE_CASE : Union[str, Any] = model_dic.get("thre_bp3" ) return conv_ins def _A ( self : Any , UpperCAmelCase_ : Any ): return 1 / (1 + np.exp(-1 * x )) def _A ( self : List[str] , UpperCAmelCase_ : int ): return round(UpperCAmelCase_ , 3 ) def _A ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ): # convolution process SCREAMING_SNAKE_CASE : Tuple = convs[0] SCREAMING_SNAKE_CASE : Union[str, Any] = convs[1] SCREAMING_SNAKE_CASE : List[str] = np.shape(UpperCAmelCase_ )[0] # get the data slice of original image data, data_focus SCREAMING_SNAKE_CASE : Optional[Any] = [] for i_focus in range(0 , size_data - size_conv + 1 , UpperCAmelCase_ ): for j_focus in range(0 , size_data - size_conv + 1 , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : str = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(UpperCAmelCase_ ) # calculate the feature map of every single kernel, and saved as list of matrix SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : List[Any] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Dict = [] for i_focus in range(len(UpperCAmelCase_ ) ): SCREAMING_SNAKE_CASE : Union[str, Any] = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : List[Any] = np.asmatrix(UpperCAmelCase_ ).reshape( UpperCAmelCase_ , UpperCAmelCase_ ) data_featuremap.append(UpperCAmelCase_ ) # expanding the data slice to One dimenssion SCREAMING_SNAKE_CASE : List[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCAmelCase_ ) return focus_list, data_featuremap def _A ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]="average_pool" ): # pooling process SCREAMING_SNAKE_CASE : List[str] = len(featuremaps[0] ) SCREAMING_SNAKE_CASE : Dict = int(size_map / size_pooling ) SCREAMING_SNAKE_CASE : int = [] for i_map in range(len(UpperCAmelCase_ ) ): SCREAMING_SNAKE_CASE : Any = featuremaps[i_map] SCREAMING_SNAKE_CASE : Dict = [] for i_focus in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ): for j_focus in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Any = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(UpperCAmelCase_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Tuple = np.asmatrix(UpperCAmelCase_ ).reshape(UpperCAmelCase_ , UpperCAmelCase_ ) featuremap_pooled.append(UpperCAmelCase_ ) return featuremap_pooled def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] ): # expanding three dimension data to one dimension list SCREAMING_SNAKE_CASE : Union[str, Any] = [] for i in range(len(UpperCAmelCase_ ) ): SCREAMING_SNAKE_CASE : str = np.shape(data[i] ) SCREAMING_SNAKE_CASE : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] ) SCREAMING_SNAKE_CASE : str = data_listed.getA().tolist()[0] data_expanded.extend(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = np.asarray(UpperCAmelCase_ ) return data_expanded def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] ): # expanding matrix to one dimension list SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = np.shape(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def _A ( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ): SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : Tuple = 0 for i_map in range(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Optional[Any] = np.ones((size_map, size_map) ) for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ): for j in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Tuple = pd_pool[ i_pool ] SCREAMING_SNAKE_CASE : List[Any] = i_pool + 1 SCREAMING_SNAKE_CASE : Any = np.multiply( UpperCAmelCase_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(UpperCAmelCase_ ) return pd_all def _A ( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]=bool ): # model traning print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(UpperCAmelCase_ )) ) print((" - - Shape: Teach_Data ", np.shape(UpperCAmelCase_ )) ) SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : Optional[int] = 1_0000 while rp < n_repeat and mse >= error_accuracy: SCREAMING_SNAKE_CASE : Optional[int] = 0 print(f'''-------------Learning Time {rp}--------------''' ) for p in range(len(UpperCAmelCase_ ) ): # print('------------Learning Image: %d--------------'%p) SCREAMING_SNAKE_CASE : Dict = np.asmatrix(datas_train[p] ) SCREAMING_SNAKE_CASE : List[str] = np.asarray(datas_teach[p] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.convolute( UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE : Tuple = self.pooling(UpperCAmelCase_ , self.size_poolinga ) SCREAMING_SNAKE_CASE : str = np.shape(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self._expand(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = data_bp_input SCREAMING_SNAKE_CASE : List[Any] = np.dot(UpperCAmelCase_ , self.vji.T ) - self.thre_bpa SCREAMING_SNAKE_CASE : Dict = self.sig(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = np.dot(UpperCAmelCase_ , self.wkj.T ) - self.thre_bpa SCREAMING_SNAKE_CASE : Any = self.sig(UpperCAmelCase_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- SCREAMING_SNAKE_CASE : Dict = np.multiply( (data_teach - bp_outa) , np.multiply(UpperCAmelCase_ , (1 - bp_outa) ) ) SCREAMING_SNAKE_CASE : Any = np.multiply( np.dot(UpperCAmelCase_ , self.wkj ) , np.multiply(UpperCAmelCase_ , (1 - bp_outa) ) ) SCREAMING_SNAKE_CASE : Optional[Any] = np.dot(UpperCAmelCase_ , self.vji ) SCREAMING_SNAKE_CASE : Any = pd_i_all / (self.size_poolinga * self.size_poolinga) SCREAMING_SNAKE_CASE : int = pd_conva_pooled.T.getA().tolist() SCREAMING_SNAKE_CASE : int = self._calculate_gradient_from_pool( UpperCAmelCase_ , UpperCAmelCase_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): SCREAMING_SNAKE_CASE : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] ) SCREAMING_SNAKE_CASE : List[Any] = self.rate_weight * np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) SCREAMING_SNAKE_CASE : int = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer SCREAMING_SNAKE_CASE : Union[str, Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE : Union[str, Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE : Any = self.thre_bpa - pd_k_all * self.rate_thre SCREAMING_SNAKE_CASE : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image SCREAMING_SNAKE_CASE : Optional[int] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) SCREAMING_SNAKE_CASE : str = rp + 1 SCREAMING_SNAKE_CASE : Optional[int] = error_count / patterns all_mse.append(UpperCAmelCase_ ) def draw_error(): SCREAMING_SNAKE_CASE : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(UpperCAmelCase_ , "+-" ) plt.plot(UpperCAmelCase_ , "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(UpperCAmelCase_ , alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, f''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def _A ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ): # model predict SCREAMING_SNAKE_CASE : List[str] = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(UpperCAmelCase_ )) ) for p in range(len(UpperCAmelCase_ ) ): SCREAMING_SNAKE_CASE : Optional[Any] = np.asmatrix(datas_test[p] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.convolute( UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.pooling(UpperCAmelCase_ , self.size_poolinga ) SCREAMING_SNAKE_CASE : Tuple = self._expand(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = data_bp_input SCREAMING_SNAKE_CASE : Dict = bp_outa * self.vji.T - self.thre_bpa SCREAMING_SNAKE_CASE : Any = self.sig(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = bp_outa * self.wkj.T - self.thre_bpa SCREAMING_SNAKE_CASE : Tuple = self.sig(UpperCAmelCase_ ) produce_out.extend(bp_outa.getA().tolist() ) SCREAMING_SNAKE_CASE : List[Any] = [list(map(self.do_round , UpperCAmelCase_ ) ) for each in produce_out] return np.asarray(UpperCAmelCase_ ) def _A ( self : Optional[int] , UpperCAmelCase_ : int ): # return the data of image after convoluting process so we can check it out SCREAMING_SNAKE_CASE : str = np.asmatrix(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.convolute( UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE : Optional[Any] = self.pooling(UpperCAmelCase_ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
319
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available snake_case = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
def lowerCamelCase__ ( lowercase ): """simple docstring""" assert ( isinstance(lowercase , lowercase ) and number_of_steps > 0 ), F'''number_of_steps needs to be positive integer, your input {number_of_steps}''' if number_of_steps == 1: return 1 SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = 1, 1 for _ in range(number_of_steps - 1 ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
319
def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def lowerCamelCase__ ( ): """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
319
1
class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : Union[str, Any] = set_counts SCREAMING_SNAKE_CASE : Any = max(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [1] * num_sets SCREAMING_SNAKE_CASE : List[str] = list(range(UpperCAmelCase_ ) ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.get_parent(UpperCAmelCase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 SCREAMING_SNAKE_CASE : List[str] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Tuple = src_parent SCREAMING_SNAKE_CASE : Optional[int] = self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Optional[Any] = max(self.max_set , UpperCAmelCase_ ) return True def _A ( self : Tuple , UpperCAmelCase_ : int ): if self.parents[disj_set] == disj_set: return disj_set SCREAMING_SNAKE_CASE : Tuple = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
319
class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : Union[str, Any] = set_counts SCREAMING_SNAKE_CASE : Any = max(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [1] * num_sets SCREAMING_SNAKE_CASE : List[str] = list(range(UpperCAmelCase_ ) ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.get_parent(UpperCAmelCase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 SCREAMING_SNAKE_CASE : List[str] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Tuple = src_parent SCREAMING_SNAKE_CASE : Optional[int] = self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Optional[Any] = max(self.max_set , UpperCAmelCase_ ) return True def _A ( self : Tuple , UpperCAmelCase_ : int ): if self.parents[disj_set] == disj_set: return disj_set SCREAMING_SNAKE_CASE : Tuple = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
319
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case = { """configuration_upernet""": ["""UperNetConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """UperNetForSemanticSegmentation""", """UperNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''timm_backbone''' def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ): super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = backbone SCREAMING_SNAKE_CASE : List[str] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = features_only SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
319
1
from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[int] = ['''torch'''] def __init__( self : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[int] = ['''torch'''] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Tuple , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] = ['''torch'''] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : int ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Dict , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Dict ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Tuple = ['''torch'''] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = ['''torch'''] def __init__( self : int , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Tuple = ['''torch'''] def __init__( self : Any , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Optional[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Tuple = ['''torch'''] def __init__( self : Tuple , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = ['''torch'''] def __init__( self : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Any ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Dict ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = ['''torch'''] def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Any , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Any ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = ['''torch'''] def __init__( self : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : List[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[int] = ['''torch'''] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict ): requires_backends(cls , ["torch"] ) def lowerCamelCase__ ( *lowercase , **lowercase ): """simple docstring""" requires_backends(lowercase , ["torch"] ) def lowerCamelCase__ ( *lowercase , **lowercase ): """simple docstring""" requires_backends(lowercase , ["torch"] ) def lowerCamelCase__ ( *lowercase , **lowercase ): """simple docstring""" requires_backends(lowercase , ["torch"] ) def lowerCamelCase__ ( *lowercase , **lowercase ): """simple docstring""" requires_backends(lowercase , ["torch"] ) def lowerCamelCase__ ( *lowercase , **lowercase ): """simple docstring""" requires_backends(lowercase , ["torch"] ) def lowerCamelCase__ ( *lowercase , **lowercase ): """simple docstring""" requires_backends(lowercase , ["torch"] ) def lowerCamelCase__ ( *lowercase , **lowercase ): """simple docstring""" requires_backends(lowercase , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = ['''torch'''] def __init__( self : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Any = ['''torch'''] def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[Any] = ['''torch'''] def __init__( self : List[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Any ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = ['''torch'''] def __init__( self : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Any = ['''torch'''] def __init__( self : List[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Any = ['''torch'''] def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Tuple , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[Any] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = ['''torch'''] def __init__( self : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : List[str] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[str] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[int] = ['''torch'''] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : str = ['''torch'''] def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[Any] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : int , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = ['''torch'''] def __init__( self : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : str , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = ['''torch'''] def __init__( self : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Any , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Tuple ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Any = ['''torch'''] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : List[str] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Any = ['''torch'''] def __init__( self : Dict , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : str ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[Any] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[Any] = ['''torch'''] def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] = ['''torch'''] def __init__( self : Optional[int] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : List[str] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[int] = ['''torch'''] def __init__( self : Any , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Tuple ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : int , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : int , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : str = ['''torch'''] def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Tuple = ['''torch'''] def __init__( self : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[Any] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Any = ['''torch'''] def __init__( self : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Tuple ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[Any] = ['''torch'''] def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[int] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = ['''torch'''] def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Any = ['''torch'''] def __init__( self : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : str , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = ['''torch'''] def __init__( self : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : int ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = ['''torch'''] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : int = ['''torch'''] def __init__( self : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : int ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = ['''torch'''] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Any = ['''torch'''] def __init__( self : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Tuple ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] = ['''torch'''] def __init__( self : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[Any] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Tuple = ['''torch'''] def __init__( self : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = ['''torch'''] def __init__( self : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Dict ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : int = ['''torch'''] def __init__( self : List[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[Any] = ['''torch'''] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Tuple ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[int] = ['''torch'''] def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Dict , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[Any] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[int] = ['''torch'''] def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[Any] = ['''torch'''] def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : str , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : str ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : str = ['''torch'''] def __init__( self : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = ['''torch'''] def __init__( self : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Any = ['''torch'''] def __init__( self : Tuple , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Any ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : str , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Tuple ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(cls , ["torch"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = ['''torch'''] def __init__( self : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(self , ["torch"] ) @classmethod def _A ( cls : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[Any] ): requires_backends(cls , ["torch"] ) @classmethod def _A ( cls : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple ): requires_backends(cls , ["torch"] )
319
from math import sqrt def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 for i in range(1 , int(sqrt(lowercase ) + 1 ) ): if n % i == 0 and i != sqrt(lowercase ): total += i + n // i elif i == sqrt(lowercase ): total += i return total - n def lowerCamelCase__ ( lowercase = 10000 ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = sum( i for i in range(1 , lowercase ) if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
319
1
def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" print("\nThe shortest path matrix using Floyd Warshall algorithm\n" ) for i in range(lowercase ): for j in range(lowercase ): if dist[i][j] != float("inf" ): print(int(dist[i][j] ) , end="\t" ) else: print("INF" , end="\t" ) print() def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = [[float("inf" ) for _ in range(lowercase )] for _ in range(lowercase )] for i in range(lowercase ): for j in range(lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(lowercase ): # looping through rows of graph array for i in range(lowercase ): # looping through columns of graph array for j in range(lowercase ): if ( dist[i][k] != float("inf" ) and dist[k][j] != float("inf" ) and dist[i][k] + dist[k][j] < dist[i][j] ): SCREAMING_SNAKE_CASE : Optional[int] = dist[i][k] + dist[k][j] _print_dist(lowercase , lowercase ) return dist, v if __name__ == "__main__": snake_case = int(input("""Enter number of vertices: """)) snake_case = int(input("""Enter number of edges: """)) snake_case = [[float("""inf""") for i in range(v)] for j in range(v)] for i in range(v): snake_case = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print("""\nEdge """, i + 1) snake_case = int(input("""Enter source:""")) snake_case = int(input("""Enter destination:""")) snake_case = float(input("""Enter weight:""")) snake_case = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
319
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) snake_case = { """configuration_encodec""": [ """ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EncodecConfig""", ], """feature_extraction_encodec""": ["""EncodecFeatureExtractor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""", """EncodecModel""", """EncodecPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device snake_case = False class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _A ( self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : Optional[int] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : str = pipe.dual_guided( prompt="first prompt" , image=UpperCAmelCase_ , text_to_image_strength=0.75 , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = VersatileDiffusionPipeline.from_pretrained(UpperCAmelCase_ , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = generator.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = pipe.dual_guided( prompt="first prompt" , image=UpperCAmelCase_ , text_to_image_strength=0.75 , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _A ( self : int ): SCREAMING_SNAKE_CASE : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = "cyberpunk 2077" SCREAMING_SNAKE_CASE : int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = pipe.dual_guided( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , text_to_image_strength=0.75 , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images SCREAMING_SNAKE_CASE : List[str] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 SCREAMING_SNAKE_CASE : Dict = "A painting of a squirrel eating a burger " SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = pipe.text_to_image( prompt=UpperCAmelCase_ , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 SCREAMING_SNAKE_CASE : Dict = pipe.image_variation(UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type="numpy" ).images SCREAMING_SNAKE_CASE : Tuple = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : Any = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
319
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: snake_case = None snake_case = logging.get_logger(__name__) snake_case = """▁""" snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} snake_case = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } snake_case = { """google/pegasus-xsum""": 512, } class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = PegasusTokenizer UpperCamelCase_ : str = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="<mask_2>" , UpperCAmelCase_ : Optional[int]="<mask_1>" , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=103 , **UpperCAmelCase_ : Optional[int] , ): SCREAMING_SNAKE_CASE : Optional[Any] = offset if additional_special_tokens is not None: if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise TypeError( f'''additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is''' f''' {type(UpperCAmelCase_ )}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(UpperCAmelCase_ ) , self.offset - 1 ) ] if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) SCREAMING_SNAKE_CASE : int = additional_special_tokens_extended else: SCREAMING_SNAKE_CASE : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : str = vocab_file SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def _A ( self : int , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ): if already_has_special_tokens: return self._special_token_mask(UpperCAmelCase_ ) elif token_ids_a is None: return self._special_token_mask(UpperCAmelCase_ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(UpperCAmelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : List[str] = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) return (out_vocab_file,)
319
1
import datasets snake_case = """\ @InProceedings{conneau2018xnli, author = \"Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin\", title = \"XNLI: Evaluating Cross-lingual Sentence Representations\", booktitle = \"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing\", year = \"2018\", publisher = \"Association for Computational Linguistics\", location = \"Brussels, Belgium\", } """ snake_case = """\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). """ snake_case = """ Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: 'accuracy': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric(\"xnli\") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} """ def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def _A ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def _A ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] ): return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
319
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available snake_case = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""SpeechEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""FlaxSpeechEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @require_torch def _A ( self : int ): SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline( task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" ) SCREAMING_SNAKE_CASE : Dict = load_dataset("ashraq/esc50" ) SCREAMING_SNAKE_CASE : Any = dataset["train"]["audio"][-1]["array"] SCREAMING_SNAKE_CASE : Optional[int] = audio_classifier(UpperCAmelCase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(UpperCAmelCase_ ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , ) @unittest.skip("No models are available in TF" ) def _A ( self : int ): pass @slow @require_torch def _A ( self : str ): SCREAMING_SNAKE_CASE : str = pipeline( task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , ) # This is an audio of a dog SCREAMING_SNAKE_CASE : List[str] = load_dataset("ashraq/esc50" ) SCREAMING_SNAKE_CASE : Tuple = dataset["train"]["audio"][-1]["array"] SCREAMING_SNAKE_CASE : Dict = audio_classifier(UpperCAmelCase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(UpperCAmelCase_ ) , [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ] , ) SCREAMING_SNAKE_CASE : Union[str, Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(UpperCAmelCase_ ) , [ [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) SCREAMING_SNAKE_CASE : Union[str, Any] = audio_classifier( [audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 ) self.assertEqual( nested_simplify(UpperCAmelCase_ ) , [ [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) @unittest.skip("No models are available in TF" ) def _A ( self : Union[str, Any] ): pass
319
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case = 16 snake_case = 32 def lowerCamelCase__ ( lowercase , lowercase = 16 ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE : List[Any] = datasets.map( lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE : str = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE : Optional[Any] = 8 else: SCREAMING_SNAKE_CASE : Union[str, Any] = None return tokenizer.pad( lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case = mocked_dataloaders # noqa: F811 def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1": SCREAMING_SNAKE_CASE : int = 2 # New Code # SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE : Tuple = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE : Any = config["lr"] SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] ) SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] ) SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" ) set_seed(lowercase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase ) # Instantiate scheduler SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare( lowercase , lowercase , lowercase , lowercase , lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase ): SCREAMING_SNAKE_CASE : Any = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = output.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowercase , references=lowercase , ) SCREAMING_SNAKE_CASE : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , lowercase ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE : List[str] = parser.parse_args() SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowercase , lowercase ) if __name__ == "__main__": main()
319
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) snake_case = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
import functools def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if not isinstance(lowercase , lowercase ) or not all(isinstance(lowercase , lowercase ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(lowercase ) != 3 or not all(isinstance(lowercase , lowercase ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(lowercase ) == 0: return 0 if min(lowercase ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(lowercase ) >= 366: raise ValueError("All days elements should be less than 366" ) SCREAMING_SNAKE_CASE : Dict = set(lowercase ) @functools.cache def dynamic_programming(lowercase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger snake_case = """<<<<<<< This should probably be modified because it mentions: """ snake_case = """======= >>>>>>> """ snake_case = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] snake_case = [ # (pattern, replacement) # Order is important here for some replacements (r"""tfds\.core""", r"""datasets"""), (r"""tf\.io\.gfile\.GFile""", r"""open"""), (r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""), (r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""), (r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""), (r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""), (r"""tfds\.features\.FeaturesDict\(""", r"""dict("""), (r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (r"""tfds\.""", r"""datasets."""), (r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""), (r"""self\.builder_config""", r"""self.config"""), ] def lowerCamelCase__ ( lowercase ): """simple docstring""" return ConvertCommand(args.tfds_path , args.datasets_directory ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' @staticmethod def _A ( UpperCAmelCase_ : ArgumentParser ): SCREAMING_SNAKE_CASE : Tuple = parser.add_parser( "convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , ) train_parser.add_argument( "--tfds_path" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , ) train_parser.add_argument( "--datasets_directory" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=UpperCAmelCase_ ) def __init__( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , *UpperCAmelCase_ : List[Any] ): SCREAMING_SNAKE_CASE : Optional[Any] = get_logger("datasets-cli/converting" ) SCREAMING_SNAKE_CASE : Any = tfds_path SCREAMING_SNAKE_CASE : Union[str, Any] = datasets_directory def _A ( self : List[Any] ): if os.path.isdir(self._tfds_path ): SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) SCREAMING_SNAKE_CASE : List[Any] = os.path.abspath(self._datasets_directory ) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : Optional[Any] = {} if os.path.isdir(self._tfds_path ): SCREAMING_SNAKE_CASE : List[Any] = os.listdir(UpperCAmelCase_ ) else: SCREAMING_SNAKE_CASE : Dict = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''' ) SCREAMING_SNAKE_CASE : Dict = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) if not os.path.isfile(UpperCAmelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(UpperCAmelCase_ , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE : Dict = f.readlines() SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : Optional[Any] = [] for line in lines: SCREAMING_SNAKE_CASE : Any = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: SCREAMING_SNAKE_CASE : List[str] = "import datasets\n" elif "import tensorflow" in out_line: # order is important here SCREAMING_SNAKE_CASE : List[str] = "" continue elif "from absl import logging" in out_line: SCREAMING_SNAKE_CASE : int = "from datasets import logging\n" elif "getLogger" in out_line: SCREAMING_SNAKE_CASE : List[Any] = out_line.replace("getLogger" , "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): SCREAMING_SNAKE_CASE : Any = True SCREAMING_SNAKE_CASE : str = list(filter(lambda UpperCAmelCase_ : e in out_line , UpperCAmelCase_ ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCAmelCase_ ) + "\n" ) out_lines.append(UpperCAmelCase_ ) out_lines.append(UpperCAmelCase_ ) continue else: for pattern, replacement in TO_CONVERT: SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: SCREAMING_SNAKE_CASE : Optional[Any] = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , UpperCAmelCase_ ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) SCREAMING_SNAKE_CASE : Optional[Any] = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: SCREAMING_SNAKE_CASE : List[Any] = True out_lines.append(UpperCAmelCase_ ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset SCREAMING_SNAKE_CASE : str = f_name.replace(".py" , "" ) SCREAMING_SNAKE_CASE : int = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) self._logger.info(f'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(UpperCAmelCase_ ) if needs_manual_update: with_manual_update.append(UpperCAmelCase_ ) with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f: f.writelines(UpperCAmelCase_ ) self._logger.info(f'''Converted in {output_file}''' ) for utils_file in utils_files: try: SCREAMING_SNAKE_CASE : Optional[Any] = os.path.basename(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = imports_to_builder_map[f_name.replace(".py" , "" )] self._logger.info(f'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(UpperCAmelCase_ , UpperCAmelCase_ ) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
319
def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
319
1
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version snake_case = logging.getLogger(__name__) require_version("""pytorch_lightning>=1.0.4""") snake_case = { """base""": AutoModel, """sequence-classification""": AutoModelForSequenceClassification, """question-answering""": AutoModelForQuestionAnswering, """pretraining""": AutoModelForPreTraining, """token-classification""": AutoModelForTokenClassification, """language-modeling""": AutoModelWithLMHead, """summarization""": AutoModelForSeqaSeqLM, """translation""": AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization snake_case = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } snake_case = sorted(arg_to_scheduler.keys()) snake_case = """{""" + """, """.join(arg_to_scheduler_choices) + """}""" class SCREAMING_SNAKE_CASE ( pl.LightningModule ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : argparse.Namespace , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str="base" , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Any , ): super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : List[str] = Path(self.hparams.output_dir ) SCREAMING_SNAKE_CASE : Tuple = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=UpperCAmelCase_ , **UpperCAmelCase_ , ) else: SCREAMING_SNAKE_CASE : PretrainedConfig = config SCREAMING_SNAKE_CASE : Optional[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(self.hparams , UpperCAmelCase_ , UpperCAmelCase_ ): assert hasattr(self.config , UpperCAmelCase_ ), f'''model config doesn\'t have a `{p}` attribute''' setattr(self.config , UpperCAmelCase_ , getattr(self.hparams , UpperCAmelCase_ ) ) if tokenizer is None: SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCAmelCase_ , ) else: SCREAMING_SNAKE_CASE : PreTrainedTokenizer = tokenizer SCREAMING_SNAKE_CASE : Any = MODEL_MODES[mode] if model is None: SCREAMING_SNAKE_CASE : List[Any] = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCAmelCase_ , ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = model def _A ( self : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : str ): SCREAMING_SNAKE_CASE : int = self.model_type.from_pretrained(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : List[str] = arg_to_scheduler[self.hparams.lr_scheduler] SCREAMING_SNAKE_CASE : Any = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) SCREAMING_SNAKE_CASE : Tuple = {"scheduler": scheduler, "interval": "step", "frequency": 1} return scheduler def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : int = self.model SCREAMING_SNAKE_CASE : Union[str, Any] = ["bias", "LayerNorm.weight"] SCREAMING_SNAKE_CASE : List[Any] = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters "weight_decay": self.hparams.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], "weight_decay": 0.0, }, ] if self.hparams.adafactor: SCREAMING_SNAKE_CASE : Optional[Any] = Adafactor( UpperCAmelCase_ , lr=self.hparams.learning_rate , scale_parameter=UpperCAmelCase_ , relative_step=UpperCAmelCase_ ) else: SCREAMING_SNAKE_CASE : Optional[int] = AdamW( UpperCAmelCase_ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) SCREAMING_SNAKE_CASE : Union[str, Any] = optimizer SCREAMING_SNAKE_CASE : Optional[Any] = self.get_lr_scheduler() return [optimizer], [scheduler] def _A ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ): return self.validation_step(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : int , UpperCAmelCase_ : Any ): return self.validation_end(UpperCAmelCase_ ) def _A ( self : Dict ): SCREAMING_SNAKE_CASE : Optional[Any] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores SCREAMING_SNAKE_CASE : Tuple = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def _A ( self : Dict , UpperCAmelCase_ : str ): if stage == "test": SCREAMING_SNAKE_CASE : int = len(self.test_dataloader().dataset ) else: SCREAMING_SNAKE_CASE : Any = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = len(self.train_dataloader().dataset ) def _A ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : bool = False ): raise NotImplementedError("You must implement this for your task" ) def _A ( self : Optional[Any] ): return self.train_loader def _A ( self : Optional[Any] ): return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=UpperCAmelCase_ ) def _A ( self : Optional[int] ): return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=UpperCAmelCase_ ) def _A ( self : int , UpperCAmelCase_ : Union[str, Any] ): return os.path.join( self.hparams.data_dir , "cached_{}_{}_{}".format( UpperCAmelCase_ , list(filter(UpperCAmelCase_ , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def _A ( self : str , UpperCAmelCase_ : Dict[str, Any] ): SCREAMING_SNAKE_CASE : Optional[int] = self.output_dir.joinpath("best_tfmr" ) SCREAMING_SNAKE_CASE : Tuple = self.step_count self.model.save_pretrained(UpperCAmelCase_ ) self.tokenizer.save_pretrained(UpperCAmelCase_ ) @staticmethod def _A ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] ): parser.add_argument( "--model_name_or_path" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--config_name" , default="" , type=UpperCAmelCase_ , help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="Pretrained tokenizer name or path if not the same as model_name" , ) parser.add_argument( "--cache_dir" , default=str(Path(UpperCAmelCase_ ).parent / "test_run" / "cache" ) , type=UpperCAmelCase_ , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , ) parser.add_argument( "--encoder_layerdrop" , type=UpperCAmelCase_ , help="Encoder layer dropout probability (Optional). Goes into model.config" , ) parser.add_argument( "--decoder_layerdrop" , type=UpperCAmelCase_ , help="Decoder layer dropout probability (Optional). Goes into model.config" , ) parser.add_argument( "--dropout" , type=UpperCAmelCase_ , help="Dropout probability (Optional). Goes into model.config" , ) parser.add_argument( "--attention_dropout" , type=UpperCAmelCase_ , help="Attention dropout probability (Optional). Goes into model.config" , ) parser.add_argument("--learning_rate" , default=5E-5 , type=UpperCAmelCase_ , help="The initial learning rate for Adam." ) parser.add_argument( "--lr_scheduler" , default="linear" , choices=UpperCAmelCase_ , metavar=UpperCAmelCase_ , type=UpperCAmelCase_ , help="Learning rate scheduler" , ) parser.add_argument("--weight_decay" , default=0.0 , type=UpperCAmelCase_ , help="Weight decay if we apply some." ) parser.add_argument("--adam_epsilon" , default=1E-8 , type=UpperCAmelCase_ , help="Epsilon for Adam optimizer." ) parser.add_argument("--warmup_steps" , default=0 , type=UpperCAmelCase_ , help="Linear warmup over warmup_steps." ) parser.add_argument("--num_workers" , default=4 , type=UpperCAmelCase_ , help="kwarg passed to DataLoader" ) parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=UpperCAmelCase_ ) parser.add_argument("--train_batch_size" , default=32 , type=UpperCAmelCase_ ) parser.add_argument("--eval_batch_size" , default=32 , type=UpperCAmelCase_ ) parser.add_argument("--adafactor" , action="store_true" ) class SCREAMING_SNAKE_CASE ( pl.Callback ): '''simple docstring''' def _A ( self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ): if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class SCREAMING_SNAKE_CASE ( pl.Callback ): '''simple docstring''' def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple ): # print(pl_module.model.rag) for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(UpperCAmelCase_ ) class SCREAMING_SNAKE_CASE ( pl.Callback ): '''simple docstring''' def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ): SCREAMING_SNAKE_CASE : Optional[Any] = trainer.lr_schedulers[0]["scheduler"] SCREAMING_SNAKE_CASE : int = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(UpperCAmelCase_ ) def _A ( self : Optional[Any] , UpperCAmelCase_ : pl.Trainer , UpperCAmelCase_ : pl.LightningModule ): rank_zero_info("***** Validation results *****" ) SCREAMING_SNAKE_CASE : Optional[Any] = trainer.callback_metrics # Log results for key in sorted(UpperCAmelCase_ ): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(UpperCAmelCase_ , str(metrics[key] ) ) ) def _A ( self : Any , UpperCAmelCase_ : pl.Trainer , UpperCAmelCase_ : pl.LightningModule ): rank_zero_info("***** Test results *****" ) SCREAMING_SNAKE_CASE : Optional[int] = trainer.callback_metrics # Log and save results to file SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(pl_module.hparams.output_dir , "test_results.txt" ) with open(UpperCAmelCase_ , "w" ) as writer: for key in sorted(UpperCAmelCase_ ): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(UpperCAmelCase_ , str(metrics[key] ) ) ) writer.write("{} = {}\n".format(UpperCAmelCase_ , str(metrics[key] ) ) ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" parser.add_argument( "--output_dir" , default=str(Path(lowercase ).parent / "test_run" / "model_checkpoints" ) , type=lowercase , help="The output directory where the model predictions and checkpoints will be written." , ) parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=lowercase , default="O2" , help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=lowercase ) parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=lowercase , help="Max gradient norm" ) parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." ) parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." ) parser.add_argument( "--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=lowercase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , ) parser.add_argument("--seed" , type=lowercase , default=42 , help="random seed for initialization" ) parser.add_argument( "--data_dir" , default=str(Path(lowercase ).parent / "test_run" / "dummy-train-data" ) , type=lowercase , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase=None , lowercase=True , lowercase=[] , lowercase=None , lowercase=None , **lowercase , ): """simple docstring""" pl.seed_everything(args.seed ) # init model SCREAMING_SNAKE_CASE : Tuple = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowercase ) # add custom checkpoints if checkpoint_callback is None: SCREAMING_SNAKE_CASE : Union[str, Any] = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowercase ) if logging_callback is None: SCREAMING_SNAKE_CASE : Any = LoggingCallback() SCREAMING_SNAKE_CASE : Any = {} if args.fpaa: SCREAMING_SNAKE_CASE : Any = 16 if args.gpus > 1: SCREAMING_SNAKE_CASE : Any = "auto" SCREAMING_SNAKE_CASE : str = "ddp" SCREAMING_SNAKE_CASE : Optional[Any] = args.accumulate_grad_batches SCREAMING_SNAKE_CASE : Dict = None SCREAMING_SNAKE_CASE : Tuple = "auto" SCREAMING_SNAKE_CASE : Dict = pl.Trainer.from_argparse_args( lowercase , weights_summary=lowercase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowercase , val_check_interval=1 , num_sanity_val_steps=2 , **lowercase , ) if args.do_train: trainer.fit(lowercase ) else: print("RAG modeling tests with new set functions successfuly executed!" ) return trainer
319
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = [ ("""bert.bert""", """visual_bert"""), ("""bert.cls""", """cls"""), ("""bert.classifier""", """cls"""), ("""token_type_embeddings_visual""", """visual_token_type_embeddings"""), ("""position_embeddings_visual""", """visual_position_embeddings"""), ("""projection""", """visual_projection"""), ] snake_case = [ """nlvr2_coco_pre_trained.th""", """nlvr2_fine_tuned.th""", """nlvr2_pre_trained.th""", """vcr_coco_pre_train.th""", """vcr_fine_tune.th""", """vcr_pre_train.th""", """vqa_coco_pre_trained.th""", """vqa_fine_tuned.th""", """vqa_pre_trained.th""", ] def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" ) return sd def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = OrderedDict() SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue SCREAMING_SNAKE_CASE : Optional[Any] = key for name_pair in rename_keys_prefix: SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] ) SCREAMING_SNAKE_CASE : Union[str, Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: SCREAMING_SNAKE_CASE : str = "pretraining" if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512} SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice" elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048} SCREAMING_SNAKE_CASE : Any = "vqa_advanced" elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129} SCREAMING_SNAKE_CASE : Tuple = "vqa" elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : int = { "visual_embedding_dim": 1024, "num_labels": 2, } SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr" SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase ) # Load State Dict SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase ) if model_type == "pretraining": SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase ) elif model_type == "vqa": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase ) elif model_type == "nlvr": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase ) elif model_type == "multichoice": SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase ) model.load_state_dict(lowercase ) # Save Checkpoints Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""") snake_case = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
319
1
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' @register_to_config def __init__( self : Optional[int] , *, UpperCAmelCase_ : int = 4 , UpperCAmelCase_ : int = 768 , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , ): super().__init__() SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.zeros(UpperCAmelCase_ ) ) # parameters for additional clip time embeddings SCREAMING_SNAKE_CASE : Any = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ ) # parameters for encoder hidden states SCREAMING_SNAKE_CASE : Optional[Any] = clip_extra_context_tokens SCREAMING_SNAKE_CASE : str = nn.Linear( UpperCAmelCase_ , self.clip_extra_context_tokens * cross_attention_dim ) SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = nn.LayerNorm(UpperCAmelCase_ ) def _A ( self : Dict , *, UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ): if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings SCREAMING_SNAKE_CASE : str = image_embeddings.shape[0] SCREAMING_SNAKE_CASE : Any = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_free_guidance_embeddings.expand( UpperCAmelCase_ , -1 ) SCREAMING_SNAKE_CASE : int = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] SCREAMING_SNAKE_CASE : Union[str, Any] = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... SCREAMING_SNAKE_CASE : Optional[Any] = self.embedding_proj(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.clip_image_embeddings_project_to_time_embeddings(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" SCREAMING_SNAKE_CASE : Union[str, Any] = self.clip_extra_context_tokens_proj(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = clip_extra_context_tokens.reshape(UpperCAmelCase_ , -1 , self.clip_extra_context_tokens ) SCREAMING_SNAKE_CASE : List[str] = clip_extra_context_tokens.permute(0 , 2 , 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.encoder_hidden_states_proj(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = self.text_encoder_hidden_states_norm(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
319
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''ClapFeatureExtractor''' UpperCamelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ): super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("sampling_rate" , UpperCAmelCase_ ) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none." ) if text is not None: SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if audios is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor( UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None and audios is not None: SCREAMING_SNAKE_CASE : Optional[Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ ) def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def _A ( self : str ): SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
319
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) snake_case = { """configuration_encodec""": [ """ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EncodecConfig""", ], """feature_extraction_encodec""": ["""EncodecFeatureExtractor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""", """EncodecModel""", """EncodecPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert isinstance(lowercase , lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache" SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : Optional[int] = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path elif issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ): """simple docstring""" assert isinstance(lowercase , lowercase ) for split in splits: SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tmp_path / "cache" SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : str = ParquetDatasetReader( {"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if split: SCREAMING_SNAKE_CASE : Any = {split: parquet_path} else: SCREAMING_SNAKE_CASE : Tuple = "train" SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path} SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" ) SCREAMING_SNAKE_CASE : List[Any] = pf.read() assert dataset.data.table == output_table def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]} SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} ) SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert get_writer_batch_size(lowercase ) == expected
319
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) def lowerCamelCase__ ( lowercase , lowercase=False ): """simple docstring""" SCREAMING_SNAKE_CASE : int = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" SCREAMING_SNAKE_CASE : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def lowerCamelCase__ ( lowercase , lowercase , lowercase=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: SCREAMING_SNAKE_CASE : List[str] = "" else: SCREAMING_SNAKE_CASE : Dict = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE : Any = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE : Dict = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE : Dict = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE : str = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE : str = in_proj_bias[-config.hidden_size :] def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(lowercase , lowercase ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = dct.pop(lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = val def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase , lowercase=True ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ViTConfig() # patch_size if model_name[-1] == "8": SCREAMING_SNAKE_CASE : List[str] = 8 # set labels if required if not base_model: SCREAMING_SNAKE_CASE : Tuple = 1000 SCREAMING_SNAKE_CASE : Any = "huggingface/label-files" SCREAMING_SNAKE_CASE : List[str] = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE : int = {int(lowercase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : int = idalabel SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: SCREAMING_SNAKE_CASE : List[Any] = 384 SCREAMING_SNAKE_CASE : Union[str, Any] = 1536 SCREAMING_SNAKE_CASE : int = 12 SCREAMING_SNAKE_CASE : List[str] = 6 # load original model from torch hub SCREAMING_SNAKE_CASE : Tuple = torch.hub.load("facebookresearch/dino:main" , lowercase ) original_model.eval() # load state_dict of original model, remove and rename some keys SCREAMING_SNAKE_CASE : Optional[int] = original_model.state_dict() if base_model: remove_classification_head_(lowercase ) SCREAMING_SNAKE_CASE : str = create_rename_keys(lowercase , base_model=lowercase ) for src, dest in rename_keys: rename_key(lowercase , lowercase , lowercase ) read_in_q_k_v(lowercase , lowercase , lowercase ) # load HuggingFace model if base_model: SCREAMING_SNAKE_CASE : List[str] = ViTModel(lowercase , add_pooling_layer=lowercase ).eval() else: SCREAMING_SNAKE_CASE : List[Any] = ViTForImageClassification(lowercase ).eval() model.load_state_dict(lowercase ) # Check outputs on an image, prepared by ViTImageProcessor SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor() SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" ) SCREAMING_SNAKE_CASE : List[str] = encoding["pixel_values"] SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowercase ) if base_model: SCREAMING_SNAKE_CASE : int = original_model(lowercase ) assert torch.allclose(lowercase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 ) else: SCREAMING_SNAKE_CASE : Tuple = original_model(lowercase ) assert logits.shape == outputs.logits.shape assert torch.allclose(lowercase , outputs.logits , atol=1E-3 ) Path(lowercase ).mkdir(exist_ok=lowercase ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""dino_vitb16""", type=str, help="""Name of the model trained with DINO you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--base_model""", action="""store_true""", help="""Whether to only convert the base model (no projection head weights).""", ) parser.set_defaults(base_model=True) snake_case = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
319
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FocalNetForImageClassification""", """FocalNetForMaskedImageModeling""", """FocalNetBackbone""", """FocalNetModel""", """FocalNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller snake_case = 3 def lowerCamelCase__ ( lowercase ): """simple docstring""" print("Generating primitive root of p" ) while True: SCREAMING_SNAKE_CASE : Union[str, Any] = random.randrange(3 , lowercase ) if pow(lowercase , 2 , lowercase ) == 1: continue if pow(lowercase , lowercase , lowercase ) == 1: continue return g def lowerCamelCase__ ( lowercase ): """simple docstring""" print("Generating prime p..." ) SCREAMING_SNAKE_CASE : Tuple = rabin_miller.generate_large_prime(lowercase ) # select large prime number. SCREAMING_SNAKE_CASE : List[Any] = primitive_root(lowercase ) # one primitive root on modulo p. SCREAMING_SNAKE_CASE : Tuple = random.randrange(3 , lowercase ) # private_key -> have to be greater than 2 for safety. SCREAMING_SNAKE_CASE : Dict = cryptomath.find_mod_inverse(pow(lowercase , lowercase , lowercase ) , lowercase ) SCREAMING_SNAKE_CASE : Tuple = (key_size, e_a, e_a, p) SCREAMING_SNAKE_CASE : Union[str, Any] = (key_size, d) return public_key, private_key def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ): print("\nWARNING:" ) print( F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' "Use a different name or delete these files and re-run this program." ) sys.exit() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = generate_key(lowercase ) print(F'''\nWriting public key to file {name}_pubkey.txt...''' ) with open(F'''{name}_pubkey.txt''' , "w" ) as fo: fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' ) print(F'''Writing private key to file {name}_privkey.txt...''' ) with open(F'''{name}_privkey.txt''' , "w" ) as fo: fo.write(F'''{private_key[0]},{private_key[1]}''' ) def lowerCamelCase__ ( ): """simple docstring""" print("Making key files..." ) make_key_files("elgamal" , 2048 ) print("Key files generation successful" ) if __name__ == "__main__": main()
319
def lowerCamelCase__ ( lowercase , lowercase = 0 ): """simple docstring""" SCREAMING_SNAKE_CASE : int = length or len(lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i] SCREAMING_SNAKE_CASE : str = True return list_data if not swapped else bubble_sort(lowercase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __get__( self : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=None ): # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute" ) SCREAMING_SNAKE_CASE : Union[str, Any] = "__cached_" + self.fget.__name__ SCREAMING_SNAKE_CASE : Any = getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) if cached is None: SCREAMING_SNAKE_CASE : List[str] = self.fget(UpperCAmelCase_ ) setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return cached def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F'''invalid truth value {val!r}''' ) def lowerCamelCase__ ( lowercase ): """simple docstring""" if is_torch_fx_proxy(lowercase ): return True if is_torch_available(): import torch if isinstance(lowercase , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(lowercase , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(lowercase , (jnp.ndarray, Tracer) ): return True return isinstance(lowercase , np.ndarray ) def lowerCamelCase__ ( lowercase ): """simple docstring""" return isinstance(lowercase , np.ndarray ) def lowerCamelCase__ ( lowercase ): """simple docstring""" return _is_numpy(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" import torch return isinstance(lowercase , torch.Tensor ) def lowerCamelCase__ ( lowercase ): """simple docstring""" return False if not is_torch_available() else _is_torch(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" import torch return isinstance(lowercase , torch.device ) def lowerCamelCase__ ( lowercase ): """simple docstring""" return False if not is_torch_available() else _is_torch_device(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" import torch if isinstance(lowercase , lowercase ): if hasattr(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = getattr(lowercase , lowercase ) else: return False return isinstance(lowercase , torch.dtype ) def lowerCamelCase__ ( lowercase ): """simple docstring""" return False if not is_torch_available() else _is_torch_dtype(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" import tensorflow as tf return isinstance(lowercase , tf.Tensor ) def lowerCamelCase__ ( lowercase ): """simple docstring""" return False if not is_tf_available() else _is_tensorflow(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(lowercase , "is_symbolic_tensor" ): return tf.is_symbolic_tensor(lowercase ) return type(lowercase ) == tf.Tensor def lowerCamelCase__ ( lowercase ): """simple docstring""" return False if not is_tf_available() else _is_tf_symbolic_tensor(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" import jax.numpy as jnp # noqa: F811 return isinstance(lowercase , jnp.ndarray ) def lowerCamelCase__ ( lowercase ): """simple docstring""" return False if not is_flax_available() else _is_jax(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" if isinstance(lowercase , (dict, UserDict) ): return {k: to_py_obj(lowercase ) for k, v in obj.items()} elif isinstance(lowercase , (list, tuple) ): return [to_py_obj(lowercase ) for o in obj] elif is_tf_tensor(lowercase ): return obj.numpy().tolist() elif is_torch_tensor(lowercase ): return obj.detach().cpu().tolist() elif is_jax_tensor(lowercase ): return np.asarray(lowercase ).tolist() elif isinstance(lowercase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def lowerCamelCase__ ( lowercase ): """simple docstring""" if isinstance(lowercase , (dict, UserDict) ): return {k: to_numpy(lowercase ) for k, v in obj.items()} elif isinstance(lowercase , (list, tuple) ): return np.array(lowercase ) elif is_tf_tensor(lowercase ): return obj.numpy() elif is_torch_tensor(lowercase ): return obj.detach().cpu().numpy() elif is_jax_tensor(lowercase ): return np.asarray(lowercase ) else: return obj class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : Optional[int] = fields(self ) # Safety and consistency checks if not len(UpperCAmelCase_ ): raise ValueError(f'''{self.__class__.__name__} has no fields.''' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' ) SCREAMING_SNAKE_CASE : List[str] = getattr(self , class_fields[0].name ) SCREAMING_SNAKE_CASE : Tuple = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(UpperCAmelCase_ ): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Any = first_field.items() SCREAMING_SNAKE_CASE : List[Any] = True else: try: SCREAMING_SNAKE_CASE : str = iter(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = True except TypeError: SCREAMING_SNAKE_CASE : str = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(UpperCAmelCase_ ): if ( not isinstance(UpperCAmelCase_ , (list, tuple) ) or not len(UpperCAmelCase_ ) == 2 or not isinstance(element[0] , UpperCAmelCase_ ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute SCREAMING_SNAKE_CASE : Any = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' ) break setattr(self , element[0] , element[1] ) if element[1] is not None: SCREAMING_SNAKE_CASE : Union[str, Any] = element[1] elif first_field is not None: SCREAMING_SNAKE_CASE : int = first_field else: for field in class_fields: SCREAMING_SNAKE_CASE : Any = getattr(self , field.name ) if v is not None: SCREAMING_SNAKE_CASE : Tuple = v def __delitem__( self : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ): raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' ) def _A ( self : List[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ): raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' ) def _A ( self : List[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ): raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' ) def _A ( self : str , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ): raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' ) def __getitem__( self : Tuple , UpperCAmelCase_ : Tuple ): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Any = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any ): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(UpperCAmelCase_ , UpperCAmelCase_ ) super().__setattr__(UpperCAmelCase_ , UpperCAmelCase_ ) def __setitem__( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int ): # Will raise a KeyException if needed super().__setitem__(UpperCAmelCase_ , UpperCAmelCase_ ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Union[str, Any] ): return tuple(self[k] for k in self.keys() ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' @classmethod def _A ( cls : List[str] , UpperCAmelCase_ : Tuple ): raise ValueError( f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : int = '''longest''' UpperCamelCase_ : List[Any] = '''max_length''' UpperCamelCase_ : Tuple = '''do_not_pad''' class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = '''pt''' UpperCamelCase_ : Optional[Any] = '''tf''' UpperCamelCase_ : Tuple = '''np''' UpperCamelCase_ : List[Any] = '''jax''' class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : List[ContextManager] ): SCREAMING_SNAKE_CASE : Optional[Any] = context_managers SCREAMING_SNAKE_CASE : List[str] = ExitStack() def __enter__( self : Union[str, Any] ): for context_manager in self.context_managers: self.stack.enter_context(UpperCAmelCase_ ) def __exit__( self : int , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[Any] ): self.stack.__exit__(*UpperCAmelCase_ , **UpperCAmelCase_ ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = infer_framework(lowercase ) if framework == "tf": SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model_class.forward ) # PyTorch models else: SCREAMING_SNAKE_CASE : Any = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = model_class.__name__ SCREAMING_SNAKE_CASE : str = infer_framework(lowercase ) if framework == "tf": SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": SCREAMING_SNAKE_CASE : int = inspect.signature(model_class.forward ) # PyTorch models else: SCREAMING_SNAKE_CASE : Any = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def lowerCamelCase__ ( lowercase , lowercase = "" , lowercase = "." ): """simple docstring""" def _flatten_dict(lowercase , lowercase="" , lowercase="." ): for k, v in d.items(): SCREAMING_SNAKE_CASE : Any = str(lowercase ) + delimiter + str(lowercase ) if parent_key else k if v and isinstance(lowercase , lowercase ): yield from flatten_dict(lowercase , lowercase , delimiter=lowercase ).items() else: yield key, v return dict(_flatten_dict(lowercase , lowercase , lowercase ) ) @contextmanager def lowerCamelCase__ ( lowercase , lowercase = False ): """simple docstring""" if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def lowerCamelCase__ ( lowercase , lowercase=None ): """simple docstring""" if is_numpy_array(lowercase ): return np.transpose(lowercase , axes=lowercase ) elif is_torch_tensor(lowercase ): return array.T if axes is None else array.permute(*lowercase ) elif is_tf_tensor(lowercase ): import tensorflow as tf return tf.transpose(lowercase , perm=lowercase ) elif is_jax_tensor(lowercase ): return jnp.transpose(lowercase , axes=lowercase ) else: raise ValueError(F'''Type not supported for transpose: {type(lowercase )}.''' ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if is_numpy_array(lowercase ): return np.reshape(lowercase , lowercase ) elif is_torch_tensor(lowercase ): return array.reshape(*lowercase ) elif is_tf_tensor(lowercase ): import tensorflow as tf return tf.reshape(lowercase , lowercase ) elif is_jax_tensor(lowercase ): return jnp.reshape(lowercase , lowercase ) else: raise ValueError(F'''Type not supported for reshape: {type(lowercase )}.''' ) def lowerCamelCase__ ( lowercase , lowercase=None ): """simple docstring""" if is_numpy_array(lowercase ): return np.squeeze(lowercase , axis=lowercase ) elif is_torch_tensor(lowercase ): return array.squeeze() if axis is None else array.squeeze(dim=lowercase ) elif is_tf_tensor(lowercase ): import tensorflow as tf return tf.squeeze(lowercase , axis=lowercase ) elif is_jax_tensor(lowercase ): return jnp.squeeze(lowercase , axis=lowercase ) else: raise ValueError(F'''Type not supported for squeeze: {type(lowercase )}.''' ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if is_numpy_array(lowercase ): return np.expand_dims(lowercase , lowercase ) elif is_torch_tensor(lowercase ): return array.unsqueeze(dim=lowercase ) elif is_tf_tensor(lowercase ): import tensorflow as tf return tf.expand_dims(lowercase , axis=lowercase ) elif is_jax_tensor(lowercase ): return jnp.expand_dims(lowercase , axis=lowercase ) else: raise ValueError(F'''Type not supported for expand_dims: {type(lowercase )}.''' ) def lowerCamelCase__ ( lowercase ): """simple docstring""" if is_numpy_array(lowercase ): return np.size(lowercase ) elif is_torch_tensor(lowercase ): return array.numel() elif is_tf_tensor(lowercase ): import tensorflow as tf return tf.size(lowercase ) elif is_jax_tensor(lowercase ): return array.size else: raise ValueError(F'''Type not supported for expand_dims: {type(lowercase )}.''' ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" for key, value in auto_map.items(): if isinstance(lowercase , (tuple, list) ): SCREAMING_SNAKE_CASE : str = [F'''{repo_id}--{v}''' if (v is not None and "--" not in v) else v for v in value] elif value is not None and "--" not in value: SCREAMING_SNAKE_CASE : str = F'''{repo_id}--{value}''' return auto_map def lowerCamelCase__ ( lowercase ): """simple docstring""" for base_class in inspect.getmro(lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = base_class.__module__ SCREAMING_SNAKE_CASE : int = base_class.__name__ if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel": return "tf" elif module.startswith("torch" ) or name == "PreTrainedModel": return "pt" elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F'''Could not infer framework from class {model_class}.''' )
319
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger snake_case = get_logger(__name__) snake_case = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : str , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[int] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ): for processor in self: SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(processor.__call__ ).parameters if len(UpperCAmelCase_ ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' f'''{processor.__class__} are passed to the logits processor.''' ) SCREAMING_SNAKE_CASE : int = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) else: SCREAMING_SNAKE_CASE : Dict = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : float ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not (temperature > 0): raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' ) SCREAMING_SNAKE_CASE : Optional[int] = temperature def __call__( self : List[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = scores / self.temperature return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : float , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (min_tokens_to_keep < 1): raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) SCREAMING_SNAKE_CASE : Optional[int] = top_p SCREAMING_SNAKE_CASE : str = filter_value SCREAMING_SNAKE_CASE : List[str] = min_tokens_to_keep def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = lax.top_k(UpperCAmelCase_ , scores.shape[-1] ) SCREAMING_SNAKE_CASE : str = jnp.full_like(UpperCAmelCase_ , self.filter_value ) SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.softmax(UpperCAmelCase_ , axis=-1 ).cumsum(axis=-1 ) SCREAMING_SNAKE_CASE : Tuple = cumulative_probs < self.top_p # include the token that is higher than top_p as well SCREAMING_SNAKE_CASE : Optional[int] = jnp.roll(UpperCAmelCase_ , 1 ) score_mask |= score_mask.at[:, 0].set(UpperCAmelCase_ ) # min tokens to keep SCREAMING_SNAKE_CASE : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = jnp.where(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jax.lax.sort_key_val(UpperCAmelCase_ , UpperCAmelCase_ )[-1] return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or top_k <= 0: raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) SCREAMING_SNAKE_CASE : List[str] = max(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = filter_value def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = scores.shape SCREAMING_SNAKE_CASE : List[str] = jnp.full(batch_size * vocab_size , self.filter_value ) SCREAMING_SNAKE_CASE : List[str] = min(self.top_k , scores.shape[-1] ) # Safety check SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = lax.top_k(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.broadcast_to((jnp.arange(UpperCAmelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() SCREAMING_SNAKE_CASE : List[str] = topk_scores.flatten() SCREAMING_SNAKE_CASE : List[Any] = topk_indices.flatten() + shift SCREAMING_SNAKE_CASE : Dict = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = next_scores_flat.reshape(UpperCAmelCase_ , UpperCAmelCase_ ) return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = bos_token_id def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.bool_(cur_len - 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = max_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : List[str] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : str = 1 - jnp.bool_(cur_len - self.max_length + 1 ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or min_length < 0: raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or eos_token_id < 0: raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) SCREAMING_SNAKE_CASE : List[str] = min_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): # create boolean flag to decide if min length penalty should be applied SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(UpperCAmelCase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = begin_index def __call__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index ) SCREAMING_SNAKE_CASE : List[str] = jnp.where(UpperCAmelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : List[Any] = list(UpperCAmelCase_ ) def __call__( self : Any , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Tuple = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : List[Any] = dict(UpperCAmelCase_ ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: SCREAMING_SNAKE_CASE : Any = force_token_array.at[index].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = jnp.intaa(UpperCAmelCase_ ) def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): def _force_token(UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : List[str] = scores.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = self.force_token_array[generation_idx] SCREAMING_SNAKE_CASE : Tuple = jnp.ones_like(UpperCAmelCase_ , dtype=scores.dtype ) * -float("inf" ) SCREAMING_SNAKE_CASE : Dict = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = lax.dynamic_update_slice(UpperCAmelCase_ , UpperCAmelCase_ , (0, current_token) ) return new_scores SCREAMING_SNAKE_CASE : Any = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase_ ) , lambda: scores , ) , ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Union[str, Any] = generate_config.eos_token_id SCREAMING_SNAKE_CASE : Tuple = generate_config.no_timestamps_token_id SCREAMING_SNAKE_CASE : List[Any] = generate_config.no_timestamps_token_id + 1 SCREAMING_SNAKE_CASE : Dict = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(UpperCAmelCase_ , "max_initial_timestamp_index" ): SCREAMING_SNAKE_CASE : List[Any] = generate_config.max_initial_timestamp_index else: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size if self.max_initial_timestamp_index is None: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size def __call__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): # suppress <|notimestamps|> which is handled by without_timestamps SCREAMING_SNAKE_CASE : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase_ , UpperCAmelCase_ , ) return jnp.where( UpperCAmelCase_ , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(cur_len == self.begin_index , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = self.timestamp_begin + self.max_initial_timestamp_index SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where( UpperCAmelCase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCAmelCase_ , ) # if sum of probability over timestamps is above any other token, sample timestamp SCREAMING_SNAKE_CASE : List[Any] = jax.nn.log_softmax(UpperCAmelCase_ , axis=-1 ) def handle_cumulative_probs(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) return scores
319
1
import os from collections.abc import Iterator def lowerCamelCase__ ( lowercase = "." ): """simple docstring""" for dir_path, dir_names, filenames in os.walk(lowercase ): SCREAMING_SNAKE_CASE : Tuple = [d for d in dir_names if d != "scripts" and d[0] not in "._"] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(lowercase )[1] in (".py", ".ipynb"): yield os.path.join(lowercase , lowercase ).lstrip("./" ) def lowerCamelCase__ ( lowercase ): """simple docstring""" return F'''{i * ' '}*''' if i else "\n##" def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(lowercase ) or old_parts[i] != new_part) and new_part: print(F'''{md_prefix(lowercase )} {new_part.replace('_' , ' ' ).title()}''' ) return new_path def lowerCamelCase__ ( lowercase = "." ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = "" for filepath in sorted(good_file_paths(lowercase ) ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = os.path.split(lowercase ) if filepath != old_path: SCREAMING_SNAKE_CASE : str = print_path(lowercase , lowercase ) SCREAMING_SNAKE_CASE : Tuple = (filepath.count(os.sep ) + 1) if filepath else 0 SCREAMING_SNAKE_CASE : Tuple = F'''{filepath}/{filename}'''.replace(" " , "%20" ) SCREAMING_SNAKE_CASE : List[Any] = os.path.splitext(filename.replace("_" , " " ).title() )[0] print(F'''{md_prefix(lowercase )} [{filename}]({url})''' ) if __name__ == "__main__": print_directory_md(""".""")
319
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys snake_case = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
319
1
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _A ( self : List[Any] , UpperCAmelCase_ : Dict ): SCREAMING_SNAKE_CASE : Optional[int] = 3 SCREAMING_SNAKE_CASE : Optional[int] = 250 SCREAMING_SNAKE_CASE : int = ids_tensor((batch_size, length) , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = torch.ones((batch_size, length) , device=UpperCAmelCase_ , dtype=torch.float ) / length return input_ids, scores def _A ( self : Any ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_tensors(5 ) SCREAMING_SNAKE_CASE : str = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self._get_tensors(9 ) self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self._get_tensors(10 ) self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) ) def _A ( self : int ): SCREAMING_SNAKE_CASE : Tuple = MaxLengthCriteria(max_length=10 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self._get_tensors(5 ) self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._get_tensors(9 ) self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._get_tensors(10 ) self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : int = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self._get_tensors(5 ) self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._get_tensors(9 ) self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self._get_tensors(10 ) self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : List[Any] = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def _A ( self : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._get_tensors(5 ) SCREAMING_SNAKE_CASE : List[str] = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) ) def _A ( self : Union[str, Any] ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(UpperCAmelCase_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) SCREAMING_SNAKE_CASE : Dict = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(UpperCAmelCase_ ) , 1 )
319
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
319
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } snake_case = { """b0""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 224, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 240, """dropout_rate""": 0.2, """dw_padding""": [16], }, """b2""": { """hidden_dim""": 1_408, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 260, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 16], }, """b3""": { """hidden_dim""": 1_536, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 300, """dropout_rate""": 0.3, """dw_padding""": [5, 18], }, """b4""": { """hidden_dim""": 1_792, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 380, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_048, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 456, """dropout_rate""": 0.4, """dw_padding""": [13, 27], }, """b6""": { """hidden_dim""": 2_304, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 528, """dropout_rate""": 0.5, """dw_padding""": [31], }, """b7""": { """hidden_dim""": 2_560, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 600, """dropout_rate""": 0.5, """dw_padding""": [18], }, } def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = EfficientNetConfig() SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"] SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"] SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"] SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"] SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"] SCREAMING_SNAKE_CASE : str = "huggingface/label-files" SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE : str = 1000 SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor( size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , ) return preprocessor def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) ) SCREAMING_SNAKE_CASE : List[str] = len(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )} SCREAMING_SNAKE_CASE : Dict = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b] rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) SCREAMING_SNAKE_CASE : int = {} for item in rename_keys: if item[0] in original_param_names: SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1] SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight" SCREAMING_SNAKE_CASE : List[str] = "classifier.bias" return key_mapping def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue SCREAMING_SNAKE_CASE : str = key_mapping[key] if "_conv" in key and "kernel" in key: SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) ) else: SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name]( include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , ) SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: SCREAMING_SNAKE_CASE : Tuple = param.numpy() SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() ) # Load HuggingFace model SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase ) SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval() SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase ) replace_params(lowercase , lowercase , lowercase ) # Initialize preprocessor and preprocess input image SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase ) SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy() # Original model inference SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase ) SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 ) SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(F'''Pushing converted {model_name} to the hub...''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") snake_case = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
319
1
def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def lowerCamelCase__ ( ): """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
319
def lowerCamelCase__ ( ): """simple docstring""" return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] snake_case = generate_large_matrix() snake_case = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def lowerCamelCase__ ( lowercase ): """simple docstring""" assert all(row == sorted(lowercase , reverse=lowercase ) for row in grid ) assert all(list(lowercase ) == sorted(lowercase , reverse=lowercase ) for col in zip(*lowercase ) ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: SCREAMING_SNAKE_CASE : List[Any] = (left + right) // 2 SCREAMING_SNAKE_CASE : Optional[int] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: SCREAMING_SNAKE_CASE : List[Any] = mid + 1 else: SCREAMING_SNAKE_CASE : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : List[str] = len(grid[0] ) for i in range(len(lowercase ) ): SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] ) total += bound return (len(lowercase ) * len(grid[0] )) - total def lowerCamelCase__ ( lowercase ): """simple docstring""" return len([number for row in grid for number in row if number < 0] ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 0 for row in grid: for i, number in enumerate(lowercase ): if number < 0: total += len(lowercase ) - i break return total def lowerCamelCase__ ( ): """simple docstring""" from timeit import timeit print("Running benchmarks" ) SCREAMING_SNAKE_CASE : List[str] = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(F'''{func}(grid=grid)''' , setup=lowercase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
319
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case = { """configuration_blenderbot""": [ """BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlenderbotConfig""", """BlenderbotOnnxConfig""", ], """tokenization_blenderbot""": ["""BlenderbotTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""BlenderbotTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlenderbotForCausalLM""", """BlenderbotForConditionalGeneration""", """BlenderbotModel""", """BlenderbotPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """TFBlenderbotForConditionalGeneration""", """TFBlenderbotModel""", """TFBlenderbotPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """FlaxBlenderbotForConditionalGeneration""", """FlaxBlenderbotModel""", """FlaxBlenderbotPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
import argparse import os import torch from transformers.utils import WEIGHTS_NAME snake_case = ["""small""", """medium""", """large"""] snake_case = """lm_head.decoder.weight""" snake_case = """lm_head.weight""" def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase ) SCREAMING_SNAKE_CASE : Any = d.pop(lowercase ) os.makedirs(lowercase , exist_ok=lowercase ) torch.save(lowercase , os.path.join(lowercase , lowercase ) ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument("""--dialogpt_path""", default=""".""", type=str) snake_case = parser.parse_args() for MODEL in DIALOGPT_MODELS: snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""") snake_case = F"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
319
1
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''ClapFeatureExtractor''' UpperCamelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ): super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("sampling_rate" , UpperCAmelCase_ ) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none." ) if text is not None: SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if audios is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor( UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None and audios is not None: SCREAMING_SNAKE_CASE : Optional[Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ ) def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def _A ( self : str ): SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
319
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available snake_case = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : str = 8 # DPR tok SCREAMING_SNAKE_CASE : Union[str, Any] = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok SCREAMING_SNAKE_CASE : str = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] SCREAMING_SNAKE_CASE : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) SCREAMING_SNAKE_CASE : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] SCREAMING_SNAKE_CASE : Dict = {"unk_token": "<unk>"} SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE : str = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(UpperCAmelCase_ ) ) def _A ( self : str ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def _A ( self : Optional[int] ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def _A ( self : List[Any] ): shutil.rmtree(self.tmpdirname ) @require_tokenizers def _A ( self : Dict ): SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , "rag_tokenizer" ) SCREAMING_SNAKE_CASE : str = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) SCREAMING_SNAKE_CASE : Optional[int] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(UpperCAmelCase_ ) rag_tokenizer.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = RagTokenizer.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ ) self.assertIsInstance(new_rag_tokenizer.question_encoder , UpperCAmelCase_ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , UpperCAmelCase_ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def _A ( self : str ): SCREAMING_SNAKE_CASE : Union[str, Any] = RagTokenizer.from_pretrained("facebook/rag-token-nq" ) SCREAMING_SNAKE_CASE : Union[str, Any] = [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) @slow def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : Optional[Any] = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" ) SCREAMING_SNAKE_CASE : List[str] = [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ )
319
def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def lowerCamelCase__ ( ): """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
319
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=lowerCAmelCase ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) UpperCamelCase_ : ClassVar[Features] = Features({'''text''': Value('''string''' )} ) UpperCamelCase_ : ClassVar[Features] = Features({'''labels''': ClassLabel} ) UpperCamelCase_ : str = "text" UpperCamelCase_ : str = "labels" def _A ( self : Any , UpperCAmelCase_ : Dict ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , UpperCAmelCase_ ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) SCREAMING_SNAKE_CASE : Any = copy.deepcopy(self ) SCREAMING_SNAKE_CASE : List[Any] = self.label_schema.copy() SCREAMING_SNAKE_CASE : Union[str, Any] = features[self.label_column] SCREAMING_SNAKE_CASE : str = label_schema return task_template @property def _A ( self : List[Any] ): return { self.text_column: "text", self.label_column: "labels", }
319
class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : Union[str, Any] = set_counts SCREAMING_SNAKE_CASE : Any = max(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [1] * num_sets SCREAMING_SNAKE_CASE : List[str] = list(range(UpperCAmelCase_ ) ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.get_parent(UpperCAmelCase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 SCREAMING_SNAKE_CASE : List[str] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Tuple = src_parent SCREAMING_SNAKE_CASE : Optional[int] = self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Optional[Any] = max(self.max_set , UpperCAmelCase_ ) return True def _A ( self : Tuple , UpperCAmelCase_ : int ): if self.parents[disj_set] == disj_set: return disj_set SCREAMING_SNAKE_CASE : Tuple = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
319
1
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False ): SCREAMING_SNAKE_CASE : Any = scheduler SCREAMING_SNAKE_CASE : Union[str, Any] = optimizers if isinstance(UpperCAmelCase_ , (list, tuple) ) else [optimizers] SCREAMING_SNAKE_CASE : List[Any] = split_batches SCREAMING_SNAKE_CASE : str = step_with_optimizer SCREAMING_SNAKE_CASE : str = GradientState() def _A ( self : str , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ): if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*UpperCAmelCase_ , **UpperCAmelCase_ ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*UpperCAmelCase_ , **UpperCAmelCase_ ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step SCREAMING_SNAKE_CASE : List[str] = AcceleratorState().num_processes for _ in range(UpperCAmelCase_ ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , "total_steps" ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*UpperCAmelCase_ , **UpperCAmelCase_ ) else: self.scheduler.step(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : Dict ): return self.scheduler.get_last_lr() def _A ( self : Optional[int] ): return self.scheduler.state_dict() def _A ( self : Any , UpperCAmelCase_ : int ): self.scheduler.load_state_dict(UpperCAmelCase_ ) def _A ( self : int ): return self.scheduler.get_lr() def _A ( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ): return self.scheduler.print_lr(*UpperCAmelCase_ , **UpperCAmelCase_ )
319
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''timm_backbone''' def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ): super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = backbone SCREAMING_SNAKE_CASE : List[str] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = features_only SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
319
1
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""): raise Exception("""requires fairseq >= 1.0.0a""") logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = """Hello world! cécé herlolip""" def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = FairseqRobertaModel.from_pretrained(lowercase ) roberta.eval() # disable dropout SCREAMING_SNAKE_CASE : Any = roberta.model.encoder.sentence_encoder SCREAMING_SNAKE_CASE : Optional[int] = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , ) if classification_head: SCREAMING_SNAKE_CASE : List[Any] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our RoBERTa config:" , lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = XLMRobertaXLForSequenceClassification(lowercase ) if classification_head else XLMRobertaXLForMaskedLM(lowercase ) model.eval() # Now let's copy all the weights. # Embeddings SCREAMING_SNAKE_CASE : Optional[Any] = roberta_sent_encoder.embed_tokens.weight SCREAMING_SNAKE_CASE : int = roberta_sent_encoder.embed_positions.weight SCREAMING_SNAKE_CASE : List[str] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. SCREAMING_SNAKE_CASE : str = roberta_sent_encoder.layer_norm.weight SCREAMING_SNAKE_CASE : Any = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer SCREAMING_SNAKE_CASE : BertLayer = model.roberta.encoder.layer[i] SCREAMING_SNAKE_CASE : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] SCREAMING_SNAKE_CASE : RobertaAttention = layer.attention SCREAMING_SNAKE_CASE : str = roberta_layer.self_attn_layer_norm.weight SCREAMING_SNAKE_CASE : Dict = roberta_layer.self_attn_layer_norm.bias # self attention SCREAMING_SNAKE_CASE : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) SCREAMING_SNAKE_CASE : Optional[int] = roberta_layer.self_attn.q_proj.weight SCREAMING_SNAKE_CASE : List[str] = roberta_layer.self_attn.q_proj.bias SCREAMING_SNAKE_CASE : Any = roberta_layer.self_attn.k_proj.weight SCREAMING_SNAKE_CASE : List[Any] = roberta_layer.self_attn.k_proj.bias SCREAMING_SNAKE_CASE : Union[str, Any] = roberta_layer.self_attn.v_proj.weight SCREAMING_SNAKE_CASE : Union[str, Any] = roberta_layer.self_attn.v_proj.bias # self-attention output SCREAMING_SNAKE_CASE : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape SCREAMING_SNAKE_CASE : Optional[Any] = roberta_layer.self_attn.out_proj.weight SCREAMING_SNAKE_CASE : Optional[Any] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm SCREAMING_SNAKE_CASE : List[str] = roberta_layer.final_layer_norm.weight SCREAMING_SNAKE_CASE : Optional[int] = roberta_layer.final_layer_norm.bias # intermediate SCREAMING_SNAKE_CASE : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE : Dict = roberta_layer.fca.weight SCREAMING_SNAKE_CASE : Dict = roberta_layer.fca.bias # output SCREAMING_SNAKE_CASE : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE : List[str] = roberta_layer.fca.weight SCREAMING_SNAKE_CASE : int = roberta_layer.fca.bias # end of layer if classification_head: SCREAMING_SNAKE_CASE : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.weight SCREAMING_SNAKE_CASE : int = roberta.model.classification_heads["mnli"].dense.bias SCREAMING_SNAKE_CASE : List[Any] = roberta.model.classification_heads["mnli"].out_proj.weight SCREAMING_SNAKE_CASE : List[Any] = roberta.model.classification_heads["mnli"].out_proj.bias else: # LM Head SCREAMING_SNAKE_CASE : Tuple = roberta.model.encoder.lm_head.dense.weight SCREAMING_SNAKE_CASE : Dict = roberta.model.encoder.lm_head.dense.bias SCREAMING_SNAKE_CASE : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight SCREAMING_SNAKE_CASE : Tuple = roberta.model.encoder.lm_head.layer_norm.bias SCREAMING_SNAKE_CASE : List[str] = roberta.model.encoder.lm_head.weight SCREAMING_SNAKE_CASE : Tuple = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. SCREAMING_SNAKE_CASE : torch.Tensor = roberta.encode(lowercase ).unsqueeze(0 ) # batch of size 1 SCREAMING_SNAKE_CASE : List[Any] = model(lowercase )[0] if classification_head: SCREAMING_SNAKE_CASE : Any = roberta.model.classification_heads["mnli"](roberta.extract_features(lowercase ) ) else: SCREAMING_SNAKE_CASE : Optional[int] = roberta.model(lowercase )[0] print(our_output.shape , their_output.shape ) SCREAMING_SNAKE_CASE : Tuple = torch.max(torch.abs(our_output - their_output ) ).item() print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 SCREAMING_SNAKE_CASE : Tuple = torch.allclose(lowercase , lowercase , atol=1E-3 ) print("Do both models output the same tensors?" , "🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) pathlib.Path(lowercase ).mkdir(parents=lowercase , exist_ok=lowercase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) snake_case = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
319
from math import sqrt def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 for i in range(1 , int(sqrt(lowercase ) + 1 ) ): if n % i == 0 and i != sqrt(lowercase ): total += i + n // i elif i == sqrt(lowercase ): total += i return total - n def lowerCamelCase__ ( lowercase = 10000 ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = sum( i for i in range(1 , lowercase ) if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
319
1
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = coefficient_matrix.shape SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = constant_matrix.shape if rowsa != colsa: SCREAMING_SNAKE_CASE : List[Any] = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(lowercase ) if colsa != 1: SCREAMING_SNAKE_CASE : Optional[Any] = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(lowercase ) if rowsa != rowsa: SCREAMING_SNAKE_CASE : int = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " F'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(lowercase ) if len(lowercase ) != rowsa: SCREAMING_SNAKE_CASE : Optional[Any] = ( "Number of initial values must be equal to number of rows in coefficient " F'''matrix but received {len(lowercase )} and {rowsa}''' ) raise ValueError(lowercase ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) SCREAMING_SNAKE_CASE : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = table.shape strictly_diagonally_dominant(lowercase ) # Iterates the whole matrix for given number of times for _ in range(lowercase ): SCREAMING_SNAKE_CASE : Dict = [] for row in range(lowercase ): SCREAMING_SNAKE_CASE : List[str] = 0 for col in range(lowercase ): if col == row: SCREAMING_SNAKE_CASE : Dict = table[row][col] elif col == cols - 1: SCREAMING_SNAKE_CASE : Tuple = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] SCREAMING_SNAKE_CASE : List[Any] = (temp + val) / denom new_val.append(lowercase ) SCREAMING_SNAKE_CASE : List[Any] = new_val return [float(lowercase ) for i in new_val] def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = table.shape SCREAMING_SNAKE_CASE : str = True for i in range(0 , lowercase ): SCREAMING_SNAKE_CASE : int = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
319
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) snake_case = { """configuration_encodec""": [ """ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EncodecConfig""", ], """feature_extraction_encodec""": ["""EncodecFeatureExtractor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""", """EncodecModel""", """EncodecPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = tmp_path / "file.csv" SCREAMING_SNAKE_CASE : Optional[Any] = textwrap.dedent( "\\n header1,header2\n 1,2\n 10,20\n " ) with open(lowercase , "w" ) as f: f.write(lowercase ) return str(lowercase ) @pytest.fixture def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = tmp_path / "malformed_file.csv" SCREAMING_SNAKE_CASE : Dict = textwrap.dedent( "\\n header1,header2\n 1,2\n 10,20,\n " ) with open(lowercase , "w" ) as f: f.write(lowercase ) return str(lowercase ) @pytest.fixture def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tmp_path / "csv_with_image.csv" SCREAMING_SNAKE_CASE : Tuple = textwrap.dedent( F'''\ image {image_file} ''' ) with open(lowercase , "w" ) as f: f.write(lowercase ) return str(lowercase ) @pytest.fixture def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tmp_path / "csv_with_label.csv" SCREAMING_SNAKE_CASE : Dict = textwrap.dedent( "\\n label\n good\n bad\n good\n " ) with open(lowercase , "w" ) as f: f.write(lowercase ) return str(lowercase ) @pytest.fixture def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tmp_path / "csv_with_int_list.csv" SCREAMING_SNAKE_CASE : Optional[int] = textwrap.dedent( "\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " ) with open(lowercase , "w" ) as f: f.write(lowercase ) return str(lowercase ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = Csv() SCREAMING_SNAKE_CASE : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(lowercase , match="Error tokenizing data" ): for _ in generator: pass assert any( record.levelname == "ERROR" and "Failed to read file" in record.message and os.path.basename(lowercase ) in record.message for record in caplog.records ) @require_pil def lowerCamelCase__ ( lowercase ): """simple docstring""" with open(lowercase , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE : int = f.read().splitlines()[1] SCREAMING_SNAKE_CASE : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) ) SCREAMING_SNAKE_CASE : int = csv._generate_tables([[csv_file_with_image]] ) SCREAMING_SNAKE_CASE : List[str] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("image" ).type == Image()() SCREAMING_SNAKE_CASE : Dict = pa_table.to_pydict()["image"] assert generated_content == [{"path": image_file, "bytes": None}] def lowerCamelCase__ ( lowercase ): """simple docstring""" with open(lowercase , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE : Dict = f.read().splitlines()[1:] SCREAMING_SNAKE_CASE : Union[str, Any] = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) ) SCREAMING_SNAKE_CASE : Dict = csv._generate_tables([[csv_file_with_label]] ) SCREAMING_SNAKE_CASE : Optional[int] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )() SCREAMING_SNAKE_CASE : Tuple = pa_table.to_pydict()["label"] assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(lowercase ) for label in labels] def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda lowercase : [int(lowercase ) for i in x.split()]} ) SCREAMING_SNAKE_CASE : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] ) SCREAMING_SNAKE_CASE : Optional[Any] = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field("int_list" ).type ) SCREAMING_SNAKE_CASE : Union[str, Any] = pa_table.to_pydict()["int_list"] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
319
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: snake_case = None snake_case = logging.get_logger(__name__) snake_case = """▁""" snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} snake_case = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } snake_case = { """google/pegasus-xsum""": 512, } class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = PegasusTokenizer UpperCamelCase_ : str = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="<mask_2>" , UpperCAmelCase_ : Optional[int]="<mask_1>" , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=103 , **UpperCAmelCase_ : Optional[int] , ): SCREAMING_SNAKE_CASE : Optional[Any] = offset if additional_special_tokens is not None: if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise TypeError( f'''additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is''' f''' {type(UpperCAmelCase_ )}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(UpperCAmelCase_ ) , self.offset - 1 ) ] if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) SCREAMING_SNAKE_CASE : int = additional_special_tokens_extended else: SCREAMING_SNAKE_CASE : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : str = vocab_file SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def _A ( self : int , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ): if already_has_special_tokens: return self._special_token_mask(UpperCAmelCase_ ) elif token_ids_a is None: return self._special_token_mask(UpperCAmelCase_ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(UpperCAmelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : List[str] = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) return (out_vocab_file,)
319
1
import math import unittest def lowerCamelCase__ ( lowercase ): """simple docstring""" assert isinstance(lowercase , lowercase ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _A ( self : List[str] ): self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def _A ( self : int ): with self.assertRaises(UpperCAmelCase_ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , ) self.assertFalse( is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
319
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available snake_case = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""SpeechEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""FlaxSpeechEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
from typing import List, Optional, Union import torch from transformers import ( XLMRobertaTokenizer, ) from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) from .text_encoder import MultilingualCLIP snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name snake_case = """ Examples: ```py >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline >>> import torch >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\") >>> pipe_prior.to(\"cuda\") >>> prompt = \"red cat, 4k photo\" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> negative_image_emb = out.negative_image_embeds >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\") >>> pipe.to(\"cuda\") >>> image = pipe( ... prompt, ... image_embeds=image_emb, ... negative_image_embeds=negative_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... ).images >>> image[0].save(\"cat.png\") ``` """ def lowerCamelCase__ ( lowercase , lowercase , lowercase=8 ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 SCREAMING_SNAKE_CASE : List[Any] = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : MultilingualCLIP , UpperCAmelCase_ : XLMRobertaTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , UpperCAmelCase_ : VQModel , ): super().__init__() self.register_modules( text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , movq=UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def _A ( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ): if latents is None: SCREAMING_SNAKE_CASE : str = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) SCREAMING_SNAKE_CASE : Tuple = latents.to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = latents * scheduler.init_noise_sigma return latents def _A ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=None , ): SCREAMING_SNAKE_CASE : Union[str, Any] = len(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else 1 # get prompt text embeddings SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer( UpperCAmelCase_ , padding="max_length" , truncation=UpperCAmelCase_ , max_length=77 , return_attention_mask=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors="pt" , ) SCREAMING_SNAKE_CASE : Optional[int] = text_inputs.input_ids SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , padding="longest" , return_tensors="pt" ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Dict = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = text_input_ids.to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = text_inputs.attention_mask.to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.text_encoder( input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = prompt_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Optional[int] = text_encoder_hidden_states.repeat_interleave(UpperCAmelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : int = text_mask.repeat_interleave(UpperCAmelCase_ , dim=0 ) if do_classifier_free_guidance: SCREAMING_SNAKE_CASE : List[str] if negative_prompt is None: SCREAMING_SNAKE_CASE : str = [""] * batch_size elif type(UpperCAmelCase_ ) is not type(UpperCAmelCase_ ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase_ )} !=''' f''' {type(UpperCAmelCase_ )}.''' ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : int = [negative_prompt] elif batch_size != len(UpperCAmelCase_ ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase_ )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' " the batch size of `prompt`." ) else: SCREAMING_SNAKE_CASE : int = negative_prompt SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer( UpperCAmelCase_ , padding="max_length" , max_length=77 , truncation=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors="pt" , ) SCREAMING_SNAKE_CASE : List[Any] = uncond_input.input_ids.to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = uncond_input.attention_mask.to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.text_encoder( input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method SCREAMING_SNAKE_CASE : Tuple = negative_prompt_embeds.shape[1] SCREAMING_SNAKE_CASE : Union[str, Any] = negative_prompt_embeds.repeat(1 , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = uncond_text_encoder_hidden_states.shape[1] SCREAMING_SNAKE_CASE : Tuple = uncond_text_encoder_hidden_states.repeat(1 , UpperCAmelCase_ , 1 ) SCREAMING_SNAKE_CASE : int = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt , UpperCAmelCase_ , -1 ) SCREAMING_SNAKE_CASE : str = uncond_text_mask.repeat_interleave(UpperCAmelCase_ , dim=0 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes SCREAMING_SNAKE_CASE : str = torch.cat([negative_prompt_embeds, prompt_embeds] ) SCREAMING_SNAKE_CASE : List[Any] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([uncond_text_mask, text_mask] ) return prompt_embeds, text_encoder_hidden_states, text_mask def _A ( self : List[str] , UpperCAmelCase_ : int=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) SCREAMING_SNAKE_CASE : str = torch.device(f'''cuda:{gpu_id}''' ) SCREAMING_SNAKE_CASE : List[str] = [ self.unet, self.text_encoder, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Tuple , UpperCAmelCase_ : Optional[int]=0 ): if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) SCREAMING_SNAKE_CASE : Tuple = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=UpperCAmelCase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) SCREAMING_SNAKE_CASE : Optional[Any] = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = cpu_offload_with_hook(UpperCAmelCase_ , UpperCAmelCase_ , prev_module_hook=UpperCAmelCase_ ) if self.safety_checker is not None: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = cpu_offload_with_hook(self.safety_checker , UpperCAmelCase_ , prev_module_hook=UpperCAmelCase_ ) # We'll offload the last model manually. SCREAMING_SNAKE_CASE : int = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _A ( self : str ): if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(UpperCAmelCase_ , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(UpperCAmelCase_ ) def __call__( self : Any , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 100 , UpperCAmelCase_ : float = 4.0 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , ): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Any = 1 elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Union[str, Any] = len(UpperCAmelCase_ ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase_ )}''' ) SCREAMING_SNAKE_CASE : str = self._execution_device SCREAMING_SNAKE_CASE : Tuple = batch_size * num_images_per_prompt SCREAMING_SNAKE_CASE : Dict = guidance_scale > 1.0 SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self._encode_prompt( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Dict = torch.cat(UpperCAmelCase_ , dim=0 ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : List[str] = torch.cat(UpperCAmelCase_ , dim=0 ) if do_classifier_free_guidance: SCREAMING_SNAKE_CASE : Tuple = image_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : int = negative_image_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to( dtype=prompt_embeds.dtype , device=UpperCAmelCase_ ) self.scheduler.set_timesteps(UpperCAmelCase_ , device=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.timesteps SCREAMING_SNAKE_CASE : Tuple = self.unet.config.in_channels SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = get_new_h_w(UpperCAmelCase_ , UpperCAmelCase_ , self.movq_scale_factor ) # create initial latent SCREAMING_SNAKE_CASE : List[str] = self.prepare_latents( (batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE : Optional[int] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds} SCREAMING_SNAKE_CASE : List[str] = self.unet( sample=UpperCAmelCase_ , timestep=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , added_cond_kwargs=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0] if do_classifier_free_guidance: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = noise_pred.split(latents.shape[1] , dim=1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = noise_pred.chunk(2 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = variance_pred.chunk(2 ) SCREAMING_SNAKE_CASE : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) SCREAMING_SNAKE_CASE : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE : Dict = self.scheduler.step( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ , ).prev_sample # post-processing SCREAMING_SNAKE_CASE : Any = self.movq.decode(UpperCAmelCase_ , force_not_quantize=UpperCAmelCase_ )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: SCREAMING_SNAKE_CASE : str = image * 0.5 + 0.5 SCREAMING_SNAKE_CASE : Tuple = image.clamp(0 , 1 ) SCREAMING_SNAKE_CASE : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(UpperCAmelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase_ )
319
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case = 16 snake_case = 32 def lowerCamelCase__ ( lowercase , lowercase = 16 ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE : List[Any] = datasets.map( lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE : str = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE : Optional[Any] = 8 else: SCREAMING_SNAKE_CASE : Union[str, Any] = None return tokenizer.pad( lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case = mocked_dataloaders # noqa: F811 def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1": SCREAMING_SNAKE_CASE : int = 2 # New Code # SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE : Tuple = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE : Any = config["lr"] SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] ) SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] ) SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" ) set_seed(lowercase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase ) # Instantiate scheduler SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare( lowercase , lowercase , lowercase , lowercase , lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase ): SCREAMING_SNAKE_CASE : Any = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = output.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowercase , references=lowercase , ) SCREAMING_SNAKE_CASE : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , lowercase ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE : List[str] = parser.parse_args() SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowercase , lowercase ) if __name__ == "__main__": main()
319
1
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin snake_case = """ Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] """ class SCREAMING_SNAKE_CASE ( unittest.TestCase , lowerCAmelCase ): '''simple docstring''' def _A ( self : Dict ): SCREAMING_SNAKE_CASE : Optional[int] = load_tool("text-question-answering" ) self.tool.setup() SCREAMING_SNAKE_CASE : Dict = load_tool("text-question-answering" , remote=UpperCAmelCase_ ) def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : List[str] = self.tool(UpperCAmelCase_ , "What did Hugging Face do in April 2021?" ) self.assertEqual(UpperCAmelCase_ , "launched the BigScience Research Workshop" ) def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : List[Any] = self.remote_tool(UpperCAmelCase_ , "What did Hugging Face do in April 2021?" ) self.assertEqual(UpperCAmelCase_ , "launched the BigScience Research Workshop" ) def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : Dict = self.tool(text=UpperCAmelCase_ , question="What did Hugging Face do in April 2021?" ) self.assertEqual(UpperCAmelCase_ , "launched the BigScience Research Workshop" ) def _A ( self : Dict ): SCREAMING_SNAKE_CASE : List[Any] = self.remote_tool(text=UpperCAmelCase_ , question="What did Hugging Face do in April 2021?" ) self.assertEqual(UpperCAmelCase_ , "launched the BigScience Research Workshop" )
319
import functools def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if not isinstance(lowercase , lowercase ) or not all(isinstance(lowercase , lowercase ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(lowercase ) != 3 or not all(isinstance(lowercase , lowercase ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(lowercase ) == 0: return 0 if min(lowercase ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(lowercase ) >= 366: raise ValueError("All days elements should be less than 366" ) SCREAMING_SNAKE_CASE : Dict = set(lowercase ) @functools.cache def dynamic_programming(lowercase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = 0 # if input_string is "aba" than new_input_string become "a|b|a" SCREAMING_SNAKE_CASE : str = "" SCREAMING_SNAKE_CASE : List[str] = "" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(lowercase ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = 0, 0 # length[i] shows the length of palindromic substring with center i SCREAMING_SNAKE_CASE : Union[str, Any] = [1 for i in range(len(lowercase ) )] # for each character in new_string find corresponding palindromic string SCREAMING_SNAKE_CASE : int = 0 for j in range(len(lowercase ) ): SCREAMING_SNAKE_CASE : List[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(lowercase ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 SCREAMING_SNAKE_CASE : List[Any] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: SCREAMING_SNAKE_CASE : Optional[Any] = j - k + 1 # noqa: E741 SCREAMING_SNAKE_CASE : Optional[Any] = j + k - 1 # update max_length and start position if max_length < length[j]: SCREAMING_SNAKE_CASE : Dict = length[j] SCREAMING_SNAKE_CASE : str = j # create that string SCREAMING_SNAKE_CASE : int = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
319
def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
319
1
from itertools import permutations def lowerCamelCase__ ( lowercase ): """simple docstring""" if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False SCREAMING_SNAKE_CASE : Any = [7, 11, 13, 17] for i, test in enumerate(lowercase ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowerCamelCase__ ( lowercase = 10 ): """simple docstring""" return sum( int("".join(map(lowercase , lowercase ) ) ) for num in permutations(range(lowercase ) ) if is_substring_divisible(lowercase ) ) if __name__ == "__main__": print(F"""{solution() = }""")
319
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = [ ("""bert.bert""", """visual_bert"""), ("""bert.cls""", """cls"""), ("""bert.classifier""", """cls"""), ("""token_type_embeddings_visual""", """visual_token_type_embeddings"""), ("""position_embeddings_visual""", """visual_position_embeddings"""), ("""projection""", """visual_projection"""), ] snake_case = [ """nlvr2_coco_pre_trained.th""", """nlvr2_fine_tuned.th""", """nlvr2_pre_trained.th""", """vcr_coco_pre_train.th""", """vcr_fine_tune.th""", """vcr_pre_train.th""", """vqa_coco_pre_trained.th""", """vqa_fine_tuned.th""", """vqa_pre_trained.th""", ] def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" ) return sd def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = OrderedDict() SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue SCREAMING_SNAKE_CASE : Optional[Any] = key for name_pair in rename_keys_prefix: SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] ) SCREAMING_SNAKE_CASE : Union[str, Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: SCREAMING_SNAKE_CASE : str = "pretraining" if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512} SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice" elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048} SCREAMING_SNAKE_CASE : Any = "vqa_advanced" elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129} SCREAMING_SNAKE_CASE : Tuple = "vqa" elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : int = { "visual_embedding_dim": 1024, "num_labels": 2, } SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr" SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase ) # Load State Dict SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase ) if model_type == "pretraining": SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase ) elif model_type == "vqa": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase ) elif model_type == "nlvr": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase ) elif model_type == "multichoice": SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase ) model.load_state_dict(lowercase ) # Save Checkpoints Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""") snake_case = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
319
1
import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json snake_case = """sshleifer/mar_enro_6_3_student""" class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def _A ( self : Dict ): super().setUp() SCREAMING_SNAKE_CASE : Any = cached_path( "https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Optional[int] = f'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k''' @slow @require_torch_gpu def _A ( self : int ): MarianMTModel.from_pretrained(UpperCAmelCase_ ) @slow @require_torch_gpu def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : Optional[Any] = { "$MAX_LEN": 64, "$BS": 64, "$GAS": 1, "$ENRO_DIR": self.data_dir, "facebook/mbart-large-cc25": MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", "--learning_rate=3e-5": "--learning_rate 3e-4", "--num_train_epochs 6": "--num_train_epochs 1", } # Clean up bash script SCREAMING_SNAKE_CASE : Optional[Any] = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip() SCREAMING_SNAKE_CASE : List[str] = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" ) for k, v in env_vars_to_replace.items(): SCREAMING_SNAKE_CASE : int = bash_script.replace(UpperCAmelCase_ , str(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") SCREAMING_SNAKE_CASE : Union[str, Any] = f''' --output_dir {output_dir} --tokenizer_name Helsinki-NLP/opus-mt-en-ro --sortish_sampler --do_predict --gpus 1 --freeze_encoder --n_train 40000 --n_val 500 --n_test 500 --fp16_opt_level O1 --num_sanity_val_steps 0 --eval_beams 2 '''.split() # XXX: args.gpus > 1 : handle multi_gpu in the future SCREAMING_SNAKE_CASE : Dict = ["finetune.py"] + bash_script.split() + args with patch.object(UpperCAmelCase_ , "argv" , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser() SCREAMING_SNAKE_CASE : List[str] = pl.Trainer.add_argparse_args(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = SummarizationModule.add_model_specific_args(UpperCAmelCase_ , os.getcwd() ) SCREAMING_SNAKE_CASE : int = parser.parse_args() SCREAMING_SNAKE_CASE : List[Any] = main(UpperCAmelCase_ ) # Check metrics SCREAMING_SNAKE_CASE : List[str] = load_json(model.metrics_save_path ) SCREAMING_SNAKE_CASE : str = metrics["val"][0] SCREAMING_SNAKE_CASE : Union[str, Any] = metrics["val"][-1] self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''] , UpperCAmelCase_ ) self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats["val_avg_bleu"] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict SCREAMING_SNAKE_CASE : Tuple = os.listdir(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = [x for x in contents if x.endswith(".ckpt" )][0] SCREAMING_SNAKE_CASE : int = os.path.join(args.output_dir , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = torch.load(UpperCAmelCase_ , map_location="cpu" ) SCREAMING_SNAKE_CASE : Union[str, Any] = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight" assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: SCREAMING_SNAKE_CASE : Union[str, Any] = {os.path.basename(UpperCAmelCase_ ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics["test"] ) == 1 class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' @timeout_decorator.timeout(600 ) @slow @require_torch_gpu def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : Optional[int] = f'''{self.test_file_dir_str}/test_data/wmt_en_ro''' SCREAMING_SNAKE_CASE : str = { "--fp16_opt_level=O1": "", "$MAX_LEN": 128, "$BS": 16, "$GAS": 1, "$ENRO_DIR": data_dir, "$m": "sshleifer/student_marian_en_ro_6_1", "val_check_interval=0.25": "val_check_interval=1.0", } # Clean up bash script SCREAMING_SNAKE_CASE : Any = ( (self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip() ) SCREAMING_SNAKE_CASE : Any = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" ) SCREAMING_SNAKE_CASE : Dict = bash_script.replace("--fp16 " , " " ) for k, v in env_vars_to_replace.items(): SCREAMING_SNAKE_CASE : Union[str, Any] = bash_script.replace(UpperCAmelCase_ , str(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Tuple = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE : str = bash_script.replace("--fp16" , "" ) SCREAMING_SNAKE_CASE : Optional[int] = 6 SCREAMING_SNAKE_CASE : str = ( ["distillation.py"] + bash_script.split() + [ f'''--output_dir={output_dir}''', "--gpus=1", "--learning_rate=1e-3", f'''--num_train_epochs={epochs}''', "--warmup_steps=10", "--val_check_interval=1.0", "--do_predict", ] ) with patch.object(UpperCAmelCase_ , "argv" , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser() SCREAMING_SNAKE_CASE : Optional[Any] = pl.Trainer.add_argparse_args(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = SummarizationDistiller.add_model_specific_args(UpperCAmelCase_ , os.getcwd() ) SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu SCREAMING_SNAKE_CASE : List[Any] = distill_main(UpperCAmelCase_ ) # Check metrics SCREAMING_SNAKE_CASE : Optional[Any] = load_json(model.metrics_save_path ) SCREAMING_SNAKE_CASE : str = metrics["val"][0] SCREAMING_SNAKE_CASE : Dict = metrics["val"][-1] assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''] , UpperCAmelCase_ ) # check lightning ckpt can be loaded and has a reasonable statedict SCREAMING_SNAKE_CASE : int = os.listdir(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = [x for x in contents if x.endswith(".ckpt" )][0] SCREAMING_SNAKE_CASE : Any = os.path.join(args.output_dir , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = torch.load(UpperCAmelCase_ , map_location="cpu" ) SCREAMING_SNAKE_CASE : Tuple = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight" assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: SCREAMING_SNAKE_CASE : Any = {os.path.basename(UpperCAmelCase_ ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics["test"] ) == 1
319
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''ClapFeatureExtractor''' UpperCamelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ): super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("sampling_rate" , UpperCAmelCase_ ) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none." ) if text is not None: SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if audios is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor( UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None and audios is not None: SCREAMING_SNAKE_CASE : Optional[Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ ) def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def _A ( self : str ): SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
319
1
import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Tuple = RoCBertTokenizer UpperCamelCase_ : Dict = None UpperCamelCase_ : Optional[int] = False UpperCamelCase_ : List[Any] = True UpperCamelCase_ : List[Any] = filter_non_english def _A ( self : Tuple ): super().setUp() SCREAMING_SNAKE_CASE : List[str] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"] SCREAMING_SNAKE_CASE : Optional[Any] = {} SCREAMING_SNAKE_CASE : Optional[int] = {} for i, value in enumerate(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Optional[Any] = i SCREAMING_SNAKE_CASE : List[Any] = i SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] ) SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer: json.dump(UpperCAmelCase_ , UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer: json.dump(UpperCAmelCase_ , UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) def _A ( self : str ): SCREAMING_SNAKE_CASE : str = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) SCREAMING_SNAKE_CASE : int = tokenizer.tokenize("你好[SEP]你是谁" ) self.assertListEqual(UpperCAmelCase_ , ["你", "好", "[SEP]", "你", "是", "谁"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase_ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase_ ) , [5, 6, 2, 5, 7, 8] ) def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : Dict = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : Any = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _A ( self : int ): SCREAMING_SNAKE_CASE : int = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : int = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : Optional[int] = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _A ( self : str ): SCREAMING_SNAKE_CASE : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : Tuple = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : str = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def _A ( self : Dict ): SCREAMING_SNAKE_CASE : Tuple = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : int = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] SCREAMING_SNAKE_CASE : Union[str, Any] = {} for i, token in enumerate(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Dict = i SCREAMING_SNAKE_CASE : Any = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def _A ( self : int ): self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def _A ( self : Tuple ): self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def _A ( self : int ): self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def _A ( self : int ): SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(UpperCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) if self.test_rust_tokenizer: SCREAMING_SNAKE_CASE : List[Any] = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(UpperCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) def _A ( self : int ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode_plus( UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase_ , "do_lower_case" ) else False SCREAMING_SNAKE_CASE : List[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def _A ( self : str ): SCREAMING_SNAKE_CASE : List[str] = ["的", "人", "有"] SCREAMING_SNAKE_CASE : Optional[int] = "".join(UpperCAmelCase_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE : Union[str, Any] = True SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = tokenizer_r.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_p.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase_ ) # it is expected that only the first Chinese character is not preceded by "##". SCREAMING_SNAKE_CASE : Optional[int] = [ f'''##{token}''' if idx != 0 else token for idx, token in enumerate(UpperCAmelCase_ ) ] self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : Dict ): SCREAMING_SNAKE_CASE : int = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("你好" , add_special_tokens=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = tokenizer.encode("你是谁" , add_special_tokens=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): SCREAMING_SNAKE_CASE : List[str] = "你好,你是谁" SCREAMING_SNAKE_CASE : int = tokenizer.tokenize(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.prepare_for_model( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = tokenizer.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
319
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert isinstance(lowercase , lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache" SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : Optional[int] = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path elif issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ): """simple docstring""" assert isinstance(lowercase , lowercase ) for split in splits: SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tmp_path / "cache" SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : str = ParquetDatasetReader( {"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if split: SCREAMING_SNAKE_CASE : Any = {split: parquet_path} else: SCREAMING_SNAKE_CASE : Tuple = "train" SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path} SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" ) SCREAMING_SNAKE_CASE : List[Any] = pf.read() assert dataset.data.table == output_table def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]} SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} ) SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert get_writer_batch_size(lowercase ) == expected
319
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) def lowerCamelCase__ ( lowercase , lowercase=False ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" SCREAMING_SNAKE_CASE : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def lowerCamelCase__ ( lowercase , lowercase , lowercase=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: SCREAMING_SNAKE_CASE : Dict = "" else: SCREAMING_SNAKE_CASE : Tuple = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE : int = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE : Dict = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE : Dict = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :] def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = dct.pop(lowercase ) SCREAMING_SNAKE_CASE : List[Any] = val def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = DeiTConfig() # all deit models have fine-tuned heads SCREAMING_SNAKE_CASE : List[str] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size SCREAMING_SNAKE_CASE : Tuple = 1000 SCREAMING_SNAKE_CASE : Dict = "huggingface/label-files" SCREAMING_SNAKE_CASE : Tuple = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = {int(lowercase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Optional[Any] = idalabel SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Union[str, Any] = int(deit_name[-6:-4] ) SCREAMING_SNAKE_CASE : int = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): SCREAMING_SNAKE_CASE : List[Any] = 192 SCREAMING_SNAKE_CASE : Union[str, Any] = 768 SCREAMING_SNAKE_CASE : Optional[int] = 12 SCREAMING_SNAKE_CASE : Tuple = 3 elif deit_name[9:].startswith("small" ): SCREAMING_SNAKE_CASE : str = 384 SCREAMING_SNAKE_CASE : Optional[Any] = 1536 SCREAMING_SNAKE_CASE : List[str] = 12 SCREAMING_SNAKE_CASE : int = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): SCREAMING_SNAKE_CASE : int = 1024 SCREAMING_SNAKE_CASE : Any = 4096 SCREAMING_SNAKE_CASE : int = 24 SCREAMING_SNAKE_CASE : Union[str, Any] = 16 # load original model from timm SCREAMING_SNAKE_CASE : Tuple = timm.create_model(lowercase , pretrained=lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys SCREAMING_SNAKE_CASE : int = timm_model.state_dict() SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowercase , lowercase ) for src, dest in rename_keys: rename_key(lowercase , lowercase , lowercase ) read_in_q_k_v(lowercase , lowercase , lowercase ) # load HuggingFace model SCREAMING_SNAKE_CASE : Union[str, Any] = DeiTForImageClassificationWithTeacher(lowercase ).eval() model.load_state_dict(lowercase ) # Check outputs on an image, prepared by DeiTImageProcessor SCREAMING_SNAKE_CASE : Union[str, Any] = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 SCREAMING_SNAKE_CASE : Dict = DeiTImageProcessor(size=lowercase , crop_size=config.image_size ) SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" ) SCREAMING_SNAKE_CASE : int = encoding["pixel_values"] SCREAMING_SNAKE_CASE : Optional[Any] = model(lowercase ) SCREAMING_SNAKE_CASE : Dict = timm_model(lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowercase , outputs.logits , atol=1E-3 ) Path(lowercase ).mkdir(exist_ok=lowercase ) print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--deit_name""", default="""vit_deit_base_distilled_patch16_224""", type=str, help="""Name of the DeiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) snake_case = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
319
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FocalNetForImageClassification""", """FocalNetForMaskedImageModeling""", """FocalNetBackbone""", """FocalNetModel""", """FocalNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) snake_case = logging.getLogger(__name__) @dataclass(frozen=lowerCAmelCase ) class SCREAMING_SNAKE_CASE : '''simple docstring''' UpperCamelCase_ : str UpperCamelCase_ : str UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None @dataclass(frozen=lowerCAmelCase ) class SCREAMING_SNAKE_CASE : '''simple docstring''' UpperCamelCase_ : List[int] UpperCamelCase_ : Optional[List[int]] = None UpperCamelCase_ : Optional[List[int]] = None UpperCamelCase_ : Optional[Union[int, float]] = None UpperCamelCase_ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[InputFeatures] def __init__( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : bool = False , ): SCREAMING_SNAKE_CASE : Tuple = hans_processors[task]() SCREAMING_SNAKE_CASE : Dict = os.path.join( UpperCAmelCase_ , "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(UpperCAmelCase_ ) , UpperCAmelCase_ , ) , ) SCREAMING_SNAKE_CASE : str = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[2], label_list[1] SCREAMING_SNAKE_CASE : List[Any] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. SCREAMING_SNAKE_CASE : Tuple = cached_features_file + ".lock" with FileLock(UpperCAmelCase_ ): if os.path.exists(UpperCAmelCase_ ) and not overwrite_cache: logger.info(f'''Loading features from cached file {cached_features_file}''' ) SCREAMING_SNAKE_CASE : int = torch.load(UpperCAmelCase_ ) else: logger.info(f'''Creating features from dataset file at {data_dir}''' ) SCREAMING_SNAKE_CASE : List[Any] = ( processor.get_dev_examples(UpperCAmelCase_ ) if evaluate else processor.get_train_examples(UpperCAmelCase_ ) ) logger.info("Training examples: %s" , len(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Any = hans_convert_examples_to_features(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) logger.info("Saving features into cached file %s" , UpperCAmelCase_ ) torch.save(self.features , UpperCAmelCase_ ) def __len__( self : Optional[Any] ): return len(self.features ) def __getitem__( self : List[Any] , UpperCAmelCase_ : str ): return self.features[i] def _A ( self : List[str] ): return self.label_list if is_tf_available(): import tensorflow as tf class SCREAMING_SNAKE_CASE : '''simple docstring''' UpperCamelCase_ : List[InputFeatures] def __init__( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = 128 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : bool = False , ): SCREAMING_SNAKE_CASE : Union[str, Any] = hans_processors[task]() SCREAMING_SNAKE_CASE : Tuple = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = label_list[2], label_list[1] SCREAMING_SNAKE_CASE : Dict = label_list SCREAMING_SNAKE_CASE : Any = processor.get_dev_examples(UpperCAmelCase_ ) if evaluate else processor.get_train_examples(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = hans_convert_examples_to_features(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ): if ex_index % 1_0000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(UpperCAmelCase_ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) SCREAMING_SNAKE_CASE : Optional[int] = tf.data.Dataset.from_generator( UpperCAmelCase_ , ( { "example_id": tf.intaa, "input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa, }, tf.intaa, ) , ( { "example_id": tf.TensorShape([] ), "input_ids": tf.TensorShape([None, None] ), "attention_mask": tf.TensorShape([None, None] ), "token_type_ids": tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def _A ( self : Tuple ): return self.dataset def __len__( self : Optional[int] ): return len(self.features ) def __getitem__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] ): return self.features[i] def _A ( self : List[str] ): return self.label_list class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def _A ( self : Any , UpperCAmelCase_ : Any ): return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase_ , "heuristics_train_set.txt" ) ) , "train" ) def _A ( self : str , UpperCAmelCase_ : Optional[int] ): return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase_ , "heuristics_evaluation_set.txt" ) ) , "dev" ) def _A ( self : Any ): return ["contradiction", "entailment", "neutral"] def _A ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Tuple = [] for i, line in enumerate(UpperCAmelCase_ ): if i == 0: continue SCREAMING_SNAKE_CASE : Optional[int] = "%s-%s" % (set_type, line[0]) SCREAMING_SNAKE_CASE : Optional[int] = line[5] SCREAMING_SNAKE_CASE : List[str] = line[6] SCREAMING_SNAKE_CASE : Optional[int] = line[7][2:] if line[7].startswith("ex" ) else line[7] SCREAMING_SNAKE_CASE : int = line[0] examples.append(InputExample(guid=UpperCAmelCase_ , text_a=UpperCAmelCase_ , text_b=UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) ) return examples def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , ): """simple docstring""" SCREAMING_SNAKE_CASE : str = {label: i for i, label in enumerate(lowercase )} SCREAMING_SNAKE_CASE : Optional[Any] = [] for ex_index, example in tqdm.tqdm(enumerate(lowercase ) , desc="convert examples to features" ): if ex_index % 10000 == 0: logger.info("Writing example %d" % (ex_index) ) SCREAMING_SNAKE_CASE : Tuple = tokenizer( example.text_a , example.text_b , add_special_tokens=lowercase , max_length=lowercase , padding="max_length" , truncation=lowercase , return_overflowing_tokens=lowercase , ) SCREAMING_SNAKE_CASE : Tuple = label_map[example.label] if example.label in label_map else 0 SCREAMING_SNAKE_CASE : Optional[int] = int(example.pairID ) features.append(InputFeatures(**lowercase , label=lowercase , pairID=lowercase ) ) for i, example in enumerate(examples[:5] ): logger.info("*** Example ***" ) logger.info(F'''guid: {example}''' ) logger.info(F'''features: {features[i]}''' ) return features snake_case = { """hans""": 3, } snake_case = { """hans""": HansProcessor, }
319
def lowerCamelCase__ ( lowercase , lowercase = 0 ): """simple docstring""" SCREAMING_SNAKE_CASE : int = length or len(lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i] SCREAMING_SNAKE_CASE : str = True return list_data if not swapped else bubble_sort(lowercase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" def count_of_possible_combinations(lowercase ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(lowercase ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" def count_of_possible_combinations_with_dp_array( lowercase , lowercase ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] SCREAMING_SNAKE_CASE : Tuple = sum( count_of_possible_combinations_with_dp_array(target - item , lowercase ) for item in array ) SCREAMING_SNAKE_CASE : Optional[int] = answer return answer SCREAMING_SNAKE_CASE : Tuple = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(lowercase , lowercase ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = [0] * (target + 1) SCREAMING_SNAKE_CASE : Optional[Any] = 1 for i in range(1 , target + 1 ): for j in range(lowercase ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() snake_case = 3 snake_case = 5 snake_case = [1, 2, 5] print(combination_sum_iv(n, array, target))
319
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger snake_case = get_logger(__name__) snake_case = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : str , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[int] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ): for processor in self: SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(processor.__call__ ).parameters if len(UpperCAmelCase_ ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' f'''{processor.__class__} are passed to the logits processor.''' ) SCREAMING_SNAKE_CASE : int = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) else: SCREAMING_SNAKE_CASE : Dict = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : float ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not (temperature > 0): raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' ) SCREAMING_SNAKE_CASE : Optional[int] = temperature def __call__( self : List[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = scores / self.temperature return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : float , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (min_tokens_to_keep < 1): raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) SCREAMING_SNAKE_CASE : Optional[int] = top_p SCREAMING_SNAKE_CASE : str = filter_value SCREAMING_SNAKE_CASE : List[str] = min_tokens_to_keep def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = lax.top_k(UpperCAmelCase_ , scores.shape[-1] ) SCREAMING_SNAKE_CASE : str = jnp.full_like(UpperCAmelCase_ , self.filter_value ) SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.softmax(UpperCAmelCase_ , axis=-1 ).cumsum(axis=-1 ) SCREAMING_SNAKE_CASE : Tuple = cumulative_probs < self.top_p # include the token that is higher than top_p as well SCREAMING_SNAKE_CASE : Optional[int] = jnp.roll(UpperCAmelCase_ , 1 ) score_mask |= score_mask.at[:, 0].set(UpperCAmelCase_ ) # min tokens to keep SCREAMING_SNAKE_CASE : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = jnp.where(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jax.lax.sort_key_val(UpperCAmelCase_ , UpperCAmelCase_ )[-1] return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or top_k <= 0: raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) SCREAMING_SNAKE_CASE : List[str] = max(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = filter_value def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = scores.shape SCREAMING_SNAKE_CASE : List[str] = jnp.full(batch_size * vocab_size , self.filter_value ) SCREAMING_SNAKE_CASE : List[str] = min(self.top_k , scores.shape[-1] ) # Safety check SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = lax.top_k(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.broadcast_to((jnp.arange(UpperCAmelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() SCREAMING_SNAKE_CASE : List[str] = topk_scores.flatten() SCREAMING_SNAKE_CASE : List[Any] = topk_indices.flatten() + shift SCREAMING_SNAKE_CASE : Dict = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = next_scores_flat.reshape(UpperCAmelCase_ , UpperCAmelCase_ ) return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = bos_token_id def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.bool_(cur_len - 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = max_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : List[str] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : str = 1 - jnp.bool_(cur_len - self.max_length + 1 ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or min_length < 0: raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or eos_token_id < 0: raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) SCREAMING_SNAKE_CASE : List[str] = min_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): # create boolean flag to decide if min length penalty should be applied SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(UpperCAmelCase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = begin_index def __call__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index ) SCREAMING_SNAKE_CASE : List[str] = jnp.where(UpperCAmelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : List[Any] = list(UpperCAmelCase_ ) def __call__( self : Any , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Tuple = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : List[Any] = dict(UpperCAmelCase_ ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: SCREAMING_SNAKE_CASE : Any = force_token_array.at[index].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = jnp.intaa(UpperCAmelCase_ ) def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): def _force_token(UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : List[str] = scores.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = self.force_token_array[generation_idx] SCREAMING_SNAKE_CASE : Tuple = jnp.ones_like(UpperCAmelCase_ , dtype=scores.dtype ) * -float("inf" ) SCREAMING_SNAKE_CASE : Dict = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = lax.dynamic_update_slice(UpperCAmelCase_ , UpperCAmelCase_ , (0, current_token) ) return new_scores SCREAMING_SNAKE_CASE : Any = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase_ ) , lambda: scores , ) , ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Union[str, Any] = generate_config.eos_token_id SCREAMING_SNAKE_CASE : Tuple = generate_config.no_timestamps_token_id SCREAMING_SNAKE_CASE : List[Any] = generate_config.no_timestamps_token_id + 1 SCREAMING_SNAKE_CASE : Dict = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(UpperCAmelCase_ , "max_initial_timestamp_index" ): SCREAMING_SNAKE_CASE : List[Any] = generate_config.max_initial_timestamp_index else: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size if self.max_initial_timestamp_index is None: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size def __call__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): # suppress <|notimestamps|> which is handled by without_timestamps SCREAMING_SNAKE_CASE : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase_ , UpperCAmelCase_ , ) return jnp.where( UpperCAmelCase_ , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(cur_len == self.begin_index , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = self.timestamp_begin + self.max_initial_timestamp_index SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where( UpperCAmelCase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCAmelCase_ , ) # if sum of probability over timestamps is above any other token, sample timestamp SCREAMING_SNAKE_CASE : List[Any] = jax.nn.log_softmax(UpperCAmelCase_ , axis=-1 ) def handle_cumulative_probs(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) return scores
319
1
import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process snake_case = logging.getLogger(__name__) snake_case = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class SCREAMING_SNAKE_CASE : '''simple docstring''' UpperCamelCase_ : Optional[str] = field( default=lowerCAmelCase , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) UpperCamelCase_ : Optional[str] = field( default=lowerCAmelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase )} , ) UpperCamelCase_ : Optional[str] = field( default=lowerCAmelCase , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) UpperCamelCase_ : Optional[str] = field( default=lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] = field( default=lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] = field( default=lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase_ : bool = field( default=lowerCAmelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase_ : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase_ : bool = field( default=lowerCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) def _A ( self : Optional[Any] ): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class SCREAMING_SNAKE_CASE : '''simple docstring''' UpperCamelCase_ : Optional[str] = field( default=lowerCAmelCase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) UpperCamelCase_ : Optional[str] = field( default=lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) UpperCamelCase_ : Optional[str] = field(default=lowerCAmelCase , metadata={'''help''': '''The input training data file (a text file).'''} ) UpperCamelCase_ : Optional[str] = field( default=lowerCAmelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) UpperCamelCase_ : Optional[str] = field( default=lowerCAmelCase , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , ) UpperCamelCase_ : Optional[str] = field( default=lowerCAmelCase , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , ) UpperCamelCase_ : bool = field( default=lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) UpperCamelCase_ : Optional[int] = field( default=5 , metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' } , ) UpperCamelCase_ : Optional[int] = field( default=lowerCAmelCase , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated. Default to the max input length of the model.''' ) } , ) UpperCamelCase_ : Optional[int] = field( default=lowerCAmelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) UpperCamelCase_ : float = field( default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} ) UpperCamelCase_ : bool = field( default=lowerCAmelCase , metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) } , ) def _A ( self : int ): if self.train_file is not None: SCREAMING_SNAKE_CASE : Dict = self.train_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: SCREAMING_SNAKE_CASE : Tuple = self.validation_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" with open(lowercase , "r" , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE : Optional[int] = [json.loads(lowercase ) for line in f.read().splitlines() if (len(lowercase ) > 0 and not line.isspace())] assert len(lowercase ) == len(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = {c: dataset[c] for c in dataset.column_names} SCREAMING_SNAKE_CASE : Dict = refs return Dataset.from_dict(lowercase ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = parser.parse_args_into_dataclasses() # Detecting last checkpoint. SCREAMING_SNAKE_CASE : Optional[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: SCREAMING_SNAKE_CASE : List[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , lowercase ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , ) SCREAMING_SNAKE_CASE : List[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , ) else: SCREAMING_SNAKE_CASE : Optional[Any] = {} if data_args.train_file is not None: SCREAMING_SNAKE_CASE : Tuple = data_args.train_file if data_args.validation_file is not None: SCREAMING_SNAKE_CASE : Optional[int] = data_args.validation_file SCREAMING_SNAKE_CASE : Optional[Any] = data_args.train_file.split("." )[-1] if extension == "txt": SCREAMING_SNAKE_CASE : List[str] = "text" SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(lowercase , data_files=lowercase ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE : List[Any] = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowercase ) elif model_args.model_name_or_path: SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase ) else: SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) SCREAMING_SNAKE_CASE : Any = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowercase ) elif model_args.model_name_or_path: SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowercase ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForMaskedLM.from_config(lowercase ) model.resize_token_embeddings(len(lowercase ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: SCREAMING_SNAKE_CASE : str = datasets["train"].column_names else: SCREAMING_SNAKE_CASE : List[str] = datasets["validation"].column_names SCREAMING_SNAKE_CASE : List[str] = "text" if "text" in column_names else column_names[0] SCREAMING_SNAKE_CASE : List[Any] = "max_length" if data_args.pad_to_max_length else False def tokenize_function(lowercase ): # Remove empty lines SCREAMING_SNAKE_CASE : Any = [line for line in examples["text"] if len(lowercase ) > 0 and not line.isspace()] return tokenizer(examples["text"] , padding=lowercase , truncation=lowercase , max_length=data_args.max_seq_length ) SCREAMING_SNAKE_CASE : Tuple = datasets.map( lowercase , batched=lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: SCREAMING_SNAKE_CASE : Union[str, Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: SCREAMING_SNAKE_CASE : Optional[Any] = add_chinese_references( tokenized_datasets["validation"] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer SCREAMING_SNAKE_CASE : Tuple = data_args.train_ref_file or data_args.validation_ref_file if has_ref: SCREAMING_SNAKE_CASE : List[str] = False # Data collator # This one will take care of randomly masking the tokens. SCREAMING_SNAKE_CASE : List[Any] = DataCollatorForWholeWordMask(tokenizer=lowercase , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer SCREAMING_SNAKE_CASE : List[Any] = Trainer( model=lowercase , args=lowercase , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowercase , data_collator=lowercase , ) # Training if training_args.do_train: if last_checkpoint is not None: SCREAMING_SNAKE_CASE : Tuple = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): SCREAMING_SNAKE_CASE : Dict = model_args.model_name_or_path else: SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Dict = trainer.train(resume_from_checkpoint=lowercase ) trainer.save_model() # Saves the tokenizer too for easy upload SCREAMING_SNAKE_CASE : Dict = os.path.join(training_args.output_dir , "train_results.txt" ) if trainer.is_world_process_zero(): with open(lowercase , "w" ) as writer: logger.info("***** Train results *****" ) for key, value in sorted(train_result.metrics.items() ): logger.info(F''' {key} = {value}''' ) writer.write(F'''{key} = {value}\n''' ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) ) # Evaluation SCREAMING_SNAKE_CASE : Optional[int] = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) SCREAMING_SNAKE_CASE : str = trainer.evaluate() SCREAMING_SNAKE_CASE : Dict = math.exp(eval_output["eval_loss"] ) SCREAMING_SNAKE_CASE : Optional[Any] = perplexity SCREAMING_SNAKE_CASE : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" ) if trainer.is_world_process_zero(): with open(lowercase , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in sorted(results.items() ): logger.info(F''' {key} = {value}''' ) writer.write(F'''{key} = {value}\n''' ) return results def lowerCamelCase__ ( lowercase ): """simple docstring""" main() if __name__ == "__main__": main()
319
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys snake_case = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
319
1
import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Any = (KDPMaDiscreteScheduler,) UpperCamelCase_ : Any = 1_0 def _A ( self : List[str] , **UpperCAmelCase_ : List[Any] ): SCREAMING_SNAKE_CASE : Any = { "num_train_timesteps": 1100, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**UpperCAmelCase_ ) return config def _A ( self : List[str] ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase_ ) def _A ( self : Optional[Any] ): for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_ ) def _A ( self : Tuple ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=UpperCAmelCase_ ) def _A ( self : str ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase_ ) def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config(prediction_type="v_prediction" ) SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**UpperCAmelCase_ ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE : Tuple = sample.to(UpperCAmelCase_ ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = model(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = output.prev_sample SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(UpperCAmelCase_ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6_934E-07 ) < 1E-2 assert abs(result_mean.item() - 6.1_112E-10 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 4.693_428_650_170_972E-07 ) < 1E-2 assert abs(result_mean.item() - 0.0_002 ) < 1E-3 def _A ( self : Any ): if torch_device == "mps": return SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**UpperCAmelCase_ ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE : int = self.dummy_model() SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE : List[Any] = sample.to(UpperCAmelCase_ ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : Tuple = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(UpperCAmelCase_ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3 def _A ( self : Dict ): if torch_device == "mps": return SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Any = scheduler_class(**UpperCAmelCase_ ) scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model() SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter.to(UpperCAmelCase_ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = model(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(UpperCAmelCase_ ) ) if str(UpperCAmelCase_ ).startswith("cpu" ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3
319
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
319
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available snake_case = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""SpeechEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""FlaxSpeechEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } snake_case = { """b0""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 224, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 240, """dropout_rate""": 0.2, """dw_padding""": [16], }, """b2""": { """hidden_dim""": 1_408, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 260, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 16], }, """b3""": { """hidden_dim""": 1_536, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 300, """dropout_rate""": 0.3, """dw_padding""": [5, 18], }, """b4""": { """hidden_dim""": 1_792, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 380, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_048, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 456, """dropout_rate""": 0.4, """dw_padding""": [13, 27], }, """b6""": { """hidden_dim""": 2_304, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 528, """dropout_rate""": 0.5, """dw_padding""": [31], }, """b7""": { """hidden_dim""": 2_560, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 600, """dropout_rate""": 0.5, """dw_padding""": [18], }, } def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = EfficientNetConfig() SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"] SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"] SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"] SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"] SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"] SCREAMING_SNAKE_CASE : str = "huggingface/label-files" SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE : str = 1000 SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor( size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , ) return preprocessor def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) ) SCREAMING_SNAKE_CASE : List[str] = len(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )} SCREAMING_SNAKE_CASE : Dict = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b] rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) SCREAMING_SNAKE_CASE : int = {} for item in rename_keys: if item[0] in original_param_names: SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1] SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight" SCREAMING_SNAKE_CASE : List[str] = "classifier.bias" return key_mapping def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue SCREAMING_SNAKE_CASE : str = key_mapping[key] if "_conv" in key and "kernel" in key: SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) ) else: SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name]( include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , ) SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: SCREAMING_SNAKE_CASE : Tuple = param.numpy() SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() ) # Load HuggingFace model SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase ) SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval() SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase ) replace_params(lowercase , lowercase , lowercase ) # Initialize preprocessor and preprocess input image SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase ) SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy() # Original model inference SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase ) SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 ) SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(F'''Pushing converted {model_name} to the hub...''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") snake_case = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
319
1
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging snake_case = logging.get_logger(__name__) snake_case = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} snake_case = { """vocab_file""": { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""", """allenai/longformer-large-4096""": ( """https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json""" ), """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json""" ), }, """merges_file""": { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""", """allenai/longformer-large-4096""": ( """https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt""" ), """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt""" ), }, } snake_case = { """allenai/longformer-base-4096""": 4_096, """allenai/longformer-large-4096""": 4_096, """allenai/longformer-large-4096-finetuned-triviaqa""": 4_096, """allenai/longformer-base-4096-extra.pos.embd.only""": 4_096, """allenai/longformer-large-4096-extra.pos.embd.only""": 4_096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) SCREAMING_SNAKE_CASE : List[str] = bs[:] SCREAMING_SNAKE_CASE : List[str] = 0 for b in range(2**8 ): if b not in bs: bs.append(lowercase ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE : int = [chr(lowercase ) for n in cs] return dict(zip(lowercase , lowercase ) ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = set() SCREAMING_SNAKE_CASE : List[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE : Dict = char return pairs class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Any = VOCAB_FILES_NAMES UpperCamelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : List[str] = ['''input_ids''', '''attention_mask'''] def __init__( self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int="replace" , UpperCAmelCase_ : str="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Dict="</s>" , UpperCAmelCase_ : List[str]="<s>" , UpperCAmelCase_ : Tuple="<unk>" , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : Union[str, Any]="<mask>" , UpperCAmelCase_ : List[str]=False , **UpperCAmelCase_ : Any , ): SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else bos_token SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else eos_token SCREAMING_SNAKE_CASE : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else sep_token SCREAMING_SNAKE_CASE : List[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else cls_token SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else unk_token SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token super().__init__( errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , ) with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE : str = json.load(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE : int = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE : Any = bytes_to_unicode() SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCAmelCase_ , encoding="utf-8" ) as merges_handle: SCREAMING_SNAKE_CASE : Tuple = merges_handle.read().split("\n" )[1:-1] SCREAMING_SNAKE_CASE : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges] SCREAMING_SNAKE_CASE : Union[str, Any] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) SCREAMING_SNAKE_CASE : Any = {} SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE : Any = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def _A ( self : Optional[Any] ): return len(self.encoder ) def _A ( self : Optional[int] ): return dict(self.encoder , **self.added_tokens_encoder ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] ): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE : Optional[Any] = tuple(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = get_pairs(UpperCAmelCase_ ) if not pairs: return token while True: SCREAMING_SNAKE_CASE : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = bigram SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : List[Any] = 0 while i < len(UpperCAmelCase_ ): try: SCREAMING_SNAKE_CASE : Dict = word.index(UpperCAmelCase_ , UpperCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE : Optional[int] = j if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = new_word if len(UpperCAmelCase_ ) == 1: break else: SCREAMING_SNAKE_CASE : Union[str, Any] = get_pairs(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = " ".join(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = word return word def _A ( self : Dict , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : List[Any] = [] for token in re.findall(self.pat , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Optional[int] = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_ ).split(" " ) ) return bpe_tokens def _A ( self : str , UpperCAmelCase_ : List[str] ): return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) ) def _A ( self : Optional[int] , UpperCAmelCase_ : List[str] ): return self.decoder.get(UpperCAmelCase_ ) def _A ( self : Any , UpperCAmelCase_ : str ): SCREAMING_SNAKE_CASE : Tuple = "".join(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _A ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): if not os.path.isdir(UpperCAmelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : Dict = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE : Optional[int] = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) + "\n" ) SCREAMING_SNAKE_CASE : Any = 0 with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) SCREAMING_SNAKE_CASE : Dict = token_index writer.write(" ".join(UpperCAmelCase_ ) + "\n" ) index += 1 return vocab_file, merge_file def _A ( self : Optional[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id] SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _A ( self : Union[str, Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase_ )) + [1] return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1] def _A ( self : Optional[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id] SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _A ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=False , **UpperCAmelCase_ : List[str] ): SCREAMING_SNAKE_CASE : Any = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_ ) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE : int = " " + text return (text, kwargs)
319
def lowerCamelCase__ ( ): """simple docstring""" return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] snake_case = generate_large_matrix() snake_case = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def lowerCamelCase__ ( lowercase ): """simple docstring""" assert all(row == sorted(lowercase , reverse=lowercase ) for row in grid ) assert all(list(lowercase ) == sorted(lowercase , reverse=lowercase ) for col in zip(*lowercase ) ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: SCREAMING_SNAKE_CASE : List[Any] = (left + right) // 2 SCREAMING_SNAKE_CASE : Optional[int] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: SCREAMING_SNAKE_CASE : List[Any] = mid + 1 else: SCREAMING_SNAKE_CASE : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : List[str] = len(grid[0] ) for i in range(len(lowercase ) ): SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] ) total += bound return (len(lowercase ) * len(grid[0] )) - total def lowerCamelCase__ ( lowercase ): """simple docstring""" return len([number for row in grid for number in row if number < 0] ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 0 for row in grid: for i, number in enumerate(lowercase ): if number < 0: total += len(lowercase ) - i break return total def lowerCamelCase__ ( ): """simple docstring""" from timeit import timeit print("Running benchmarks" ) SCREAMING_SNAKE_CASE : List[str] = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(F'''{func}(grid=grid)''' , setup=lowercase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
319
1
from collections import Counter from timeit import timeit def lowerCamelCase__ ( lowercase = "" , ): """simple docstring""" return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2 def lowerCamelCase__ ( lowercase = "" ): """simple docstring""" if len(lowercase ) == 0: return True SCREAMING_SNAKE_CASE : List[Any] = input_str.replace(" " , "" ).lower() # character_freq_dict: Stores the frequency of every character in the input string SCREAMING_SNAKE_CASE : dict[str, int] = {} for character in lower_case_input_str: SCREAMING_SNAKE_CASE : List[Any] = character_freq_dict.get(lowercase , 0 ) + 1 SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def lowerCamelCase__ ( lowercase = "" ): """simple docstring""" print("\nFor string = " , lowercase , ":" ) print( "> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) print( "> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(lowercase ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) if __name__ == "__main__": snake_case = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) snake_case = can_string_be_rearranged_as_palindrome_counter(check_str) print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
319
import argparse import os import torch from transformers.utils import WEIGHTS_NAME snake_case = ["""small""", """medium""", """large"""] snake_case = """lm_head.decoder.weight""" snake_case = """lm_head.weight""" def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase ) SCREAMING_SNAKE_CASE : Any = d.pop(lowercase ) os.makedirs(lowercase , exist_ok=lowercase ) torch.save(lowercase , os.path.join(lowercase , lowercase ) ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument("""--dialogpt_path""", default=""".""", type=str) snake_case = parser.parse_args() for MODEL in DIALOGPT_MODELS: snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""") snake_case = F"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
319
1
from copy import deepcopy class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : list[int] | None = None , UpperCAmelCase_ : int | None = None ): if arr is None and size is not None: SCREAMING_SNAKE_CASE : Any = size SCREAMING_SNAKE_CASE : List[str] = [0] * size elif arr is not None: self.init(UpperCAmelCase_ ) else: raise ValueError("Either arr or size must be specified" ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : list[int] ): SCREAMING_SNAKE_CASE : Union[str, Any] = len(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = deepcopy(UpperCAmelCase_ ) for i in range(1 , self.size ): SCREAMING_SNAKE_CASE : List[Any] = self.next_(UpperCAmelCase_ ) if j < self.size: self.tree[j] += self.tree[i] def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : Union[str, Any] = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): SCREAMING_SNAKE_CASE : Optional[int] = self.next_(UpperCAmelCase_ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def _A ( UpperCAmelCase_ : int ): return index + (index & (-index)) @staticmethod def _A ( UpperCAmelCase_ : int ): return index - (index & (-index)) def _A ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value SCREAMING_SNAKE_CASE : Any = self.next_(UpperCAmelCase_ ) def _A ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): self.add(UpperCAmelCase_ , value - self.get(UpperCAmelCase_ ) ) def _A ( self : Dict , UpperCAmelCase_ : int ): if right == 0: return 0 SCREAMING_SNAKE_CASE : List[str] = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] SCREAMING_SNAKE_CASE : List[str] = self.prev(UpperCAmelCase_ ) return result def _A ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): return self.prefix(UpperCAmelCase_ ) - self.prefix(UpperCAmelCase_ ) def _A ( self : Optional[int] , UpperCAmelCase_ : int ): return self.query(UpperCAmelCase_ , index + 1 ) def _A ( self : List[Any] , UpperCAmelCase_ : int ): value -= self.tree[0] if value < 0: return -1 SCREAMING_SNAKE_CASE : int = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 SCREAMING_SNAKE_CASE : List[str] = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
319
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available snake_case = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
from abc import ABC, abstractmethod from argparse import ArgumentParser class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' @staticmethod @abstractmethod def _A ( UpperCAmelCase_ : ArgumentParser ): raise NotImplementedError() @abstractmethod def _A ( self : List[str] ): raise NotImplementedError()
319
def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def lowerCamelCase__ ( ): """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
319
1
import re def lowerCamelCase__ ( lowercase ): """simple docstring""" if len(re.findall("[ATCG]" , lowercase ) ) != len(lowercase ): raise ValueError("Invalid Strand" ) return dna.translate(dna.maketrans("ATCG" , "TAGC" ) ) if __name__ == "__main__": import doctest doctest.testmod()
319
class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : Union[str, Any] = set_counts SCREAMING_SNAKE_CASE : Any = max(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [1] * num_sets SCREAMING_SNAKE_CASE : List[str] = list(range(UpperCAmelCase_ ) ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.get_parent(UpperCAmelCase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 SCREAMING_SNAKE_CASE : List[str] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Tuple = src_parent SCREAMING_SNAKE_CASE : Optional[int] = self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Optional[Any] = max(self.max_set , UpperCAmelCase_ ) return True def _A ( self : Tuple , UpperCAmelCase_ : int ): if self.parents[disj_set] == disj_set: return disj_set SCREAMING_SNAKE_CASE : Tuple = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
319
1