code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from ...configuration_utils import PretrainedConfig __magic_name__: Optional[Any] = { "google/tapas-base-finetuned-sqa": ( "https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json" ), "google/tapas-base-finetuned-wtq": ( "https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json" ), "google/tapas-base-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json" ), "google/tapas-base-finetuned-tabfact": ( "https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json" ), } class snake_case__ ( __a ): lowercase__ : Optional[int] = '''tapas''' def __init__( self , lowerCAmelCase__=3_05_22 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10_24 , lowerCAmelCase__=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-1_2 , lowerCAmelCase__=0 , lowerCAmelCase__=1_0.0 , lowerCAmelCase__=0 , lowerCAmelCase__=1.0 , lowerCAmelCase__=None , lowerCAmelCase__=1.0 , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=1.0 , lowerCAmelCase__=1.0 , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__="ratio" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=64 , lowerCAmelCase__=32 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> List[Any]: super().__init__(pad_token_id=a_ , **a_ ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) __magic_name__ : str = vocab_size __magic_name__ : Dict = hidden_size __magic_name__ : Dict = num_hidden_layers __magic_name__ : Dict = num_attention_heads __magic_name__ : str = hidden_act __magic_name__ : List[Any] = intermediate_size __magic_name__ : List[str] = hidden_dropout_prob __magic_name__ : Optional[Any] = attention_probs_dropout_prob __magic_name__ : Tuple = max_position_embeddings __magic_name__ : Dict = type_vocab_sizes __magic_name__ : List[str] = initializer_range __magic_name__ : List[str] = layer_norm_eps # Fine-tuning task hyperparameters __magic_name__ : int = positive_label_weight __magic_name__ : List[str] = num_aggregation_labels __magic_name__ : Dict = aggregation_loss_weight __magic_name__ : str = use_answer_as_supervision __magic_name__ : Tuple = answer_loss_importance __magic_name__ : Dict = use_normalized_answer_loss __magic_name__ : Union[str, Any] = huber_loss_delta __magic_name__ : Tuple = temperature __magic_name__ : List[str] = aggregation_temperature __magic_name__ : int = use_gumbel_for_cells __magic_name__ : Any = use_gumbel_for_aggregation __magic_name__ : Tuple = average_approximation_function __magic_name__ : List[str] = cell_selection_preference __magic_name__ : List[Any] = answer_loss_cutoff __magic_name__ : Tuple = max_num_rows __magic_name__ : List[str] = max_num_columns __magic_name__ : Any = average_logits_per_cell __magic_name__ : Tuple = select_one_column __magic_name__ : Tuple = allow_empty_column_selection __magic_name__ : Tuple = init_cell_selection_weights_to_zero __magic_name__ : List[Any] = reset_position_index_per_cell __magic_name__ : Optional[int] = disable_per_token_loss # Aggregation hyperparameters __magic_name__ : Tuple = aggregation_labels __magic_name__ : int = no_aggregation_label_index if isinstance(self.aggregation_labels , a_ ): __magic_name__ : Tuple = {int(a_ ): v for k, v in aggregation_labels.items()}
324
"""simple docstring""" # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''') class lowercase: '''simple docstring''' def __init__( self: str, a_: Dict, a_: List[str], a_: bool = True, a_: bool = False ): '''simple docstring''' _snake_case : Tuple = scheduler _snake_case : Optional[Any] = optimizers if isinstance(a_, (list, tuple) ) else [optimizers] _snake_case : str = split_batches _snake_case : List[str] = step_with_optimizer _snake_case : Tuple = GradientState() def UpperCamelCase_ ( self: Optional[int], *a_: Optional[Any], **a_: Optional[int] ): '''simple docstring''' if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*a_, **a_ ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*a_, **a_ ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step _snake_case : Tuple = AcceleratorState().num_processes for _ in range(a_ ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler, """total_steps""" ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*a_, **a_ ) else: self.scheduler.step(*a_, **a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' return self.scheduler.get_last_lr() def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' return self.scheduler.state_dict() def UpperCamelCase_ ( self: List[Any], a_: Union[str, Any] ): '''simple docstring''' self.scheduler.load_state_dict(a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' return self.scheduler.get_lr() def UpperCamelCase_ ( self: Any, *a_: Optional[Any], **a_: Dict ): '''simple docstring''' return self.scheduler.print_lr(*a_, **a_ )
609
0
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging a = logging.get_logger(__name__) a = { "t5-small": "https://huggingface.co/t5-small/resolve/main/config.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/config.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/config.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json", } class _A ( __lowercase ): __a = """t5""" __a = ["""past_key_values"""] __a = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , _SCREAMING_SNAKE_CASE=3_2128 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , **_SCREAMING_SNAKE_CASE , ): _UpperCAmelCase = vocab_size _UpperCAmelCase = d_model _UpperCAmelCase = d_kv _UpperCAmelCase = d_ff _UpperCAmelCase = num_layers _UpperCAmelCase = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry _UpperCAmelCase = num_heads _UpperCAmelCase = relative_attention_num_buckets _UpperCAmelCase = relative_attention_max_distance _UpperCAmelCase = dropout_rate _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_factor _UpperCAmelCase = feed_forward_proj _UpperCAmelCase = use_cache _UpperCAmelCase = self.feed_forward_proj.split("""-""" ) _UpperCAmelCase = act_info[-1] _UpperCAmelCase = act_info[0] == """gated""" if len(_SCREAMING_SNAKE_CASE ) > 1 and act_info[0] != "gated" or len(_SCREAMING_SNAKE_CASE ) > 2: raise ValueError( F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer." """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": _UpperCAmelCase = """gelu_new""" super().__init__( pad_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) class _A ( __lowercase ): @property def UpperCAmelCase ( self ): _UpperCAmelCase = { """input_ids""": {0: """batch""", 1: """encoder_sequence"""}, """attention_mask""": {0: """batch""", 1: """encoder_sequence"""}, } if self.use_past: _UpperCAmelCase = """past_encoder_sequence + sequence""" _UpperCAmelCase = {0: """batch"""} _UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: _UpperCAmelCase = {0: """batch""", 1: """decoder_sequence"""} _UpperCAmelCase = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction="""inputs""" ) return common_inputs @property def UpperCAmelCase ( self ): return 13
175
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> int: assert isinstance(snake_case , snake_case ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[int]: _UpperCAmelCase = tmp_path / """cache""" _UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case , keep_in_memory=snake_case ).read() _check_json_dataset(snake_case , snake_case ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[Any]: _UpperCAmelCase = tmp_path / """cache""" _UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _UpperCAmelCase = features.copy() if features else default_expected_features _UpperCAmelCase = ( Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) _UpperCAmelCase = JsonDatasetReader(snake_case , features=snake_case , cache_dir=snake_case ).read() _check_json_dataset(snake_case , snake_case ) @pytest.mark.parametrize( """features""" , [ None, {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}, ] , ) def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> int: _UpperCAmelCase = tmp_path / """cache""" _UpperCAmelCase = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""} _UpperCAmelCase = features.copy() if features else default_expected_features _UpperCAmelCase = ( Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) _UpperCAmelCase = JsonDatasetReader(snake_case , features=snake_case , cache_dir=snake_case ).read() assert isinstance(snake_case , snake_case ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Optional[int]: # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} _UpperCAmelCase = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""} _UpperCAmelCase = features.copy() _UpperCAmelCase = ( Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) _UpperCAmelCase = tmp_path / """cache""" _UpperCAmelCase = JsonDatasetReader(snake_case , features=snake_case , cache_dir=snake_case ).read() assert isinstance(snake_case , snake_case ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> List[Any]: _UpperCAmelCase = tmp_path / """cache""" _UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case , split=snake_case ).read() _check_json_dataset(snake_case , snake_case ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Any: if issubclass(snake_case , snake_case ): _UpperCAmelCase = jsonl_path elif issubclass(snake_case , snake_case ): _UpperCAmelCase = [jsonl_path] _UpperCAmelCase = tmp_path / """cache""" _UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case ).read() _check_json_dataset(snake_case , snake_case ) def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case=("train",) ) -> str: assert isinstance(snake_case , snake_case ) for split in splits: _UpperCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> str: _UpperCAmelCase = tmp_path / """cache""" _UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _UpperCAmelCase = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=snake_case , keep_in_memory=snake_case ).read() _check_json_datasetdict(snake_case , snake_case ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Tuple: _UpperCAmelCase = tmp_path / """cache""" _UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _UpperCAmelCase = features.copy() if features else default_expected_features _UpperCAmelCase = ( Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) _UpperCAmelCase = JsonDatasetReader({"""train""": jsonl_path} , features=snake_case , cache_dir=snake_case ).read() _check_json_datasetdict(snake_case , snake_case ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[Any]: if split: _UpperCAmelCase = {split: jsonl_path} else: _UpperCAmelCase = """train""" _UpperCAmelCase = {"""train""": jsonl_path, """test""": jsonl_path} _UpperCAmelCase = tmp_path / """cache""" _UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case ).read() _check_json_datasetdict(snake_case , snake_case , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def _SCREAMING_SNAKE_CASE ( snake_case ) -> List[Any]: return json.load(snake_case ) def _SCREAMING_SNAKE_CASE ( snake_case ) -> List[Any]: return [json.loads(snake_case ) for line in buffer] class _A : @pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): with io.BytesIO() as buffer: JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE ).write() buffer.seek(0 ) _UpperCAmelCase = load_json_function(_SCREAMING_SNAKE_CASE ) assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert isinstance(exported_content[0] , _SCREAMING_SNAKE_CASE ) assert len(_SCREAMING_SNAKE_CASE ) == 10 @pytest.mark.parametrize( """orient, container, keys, len_at""" , [ ("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None), ("""split""", dict, {"""columns""", """data"""}, """data"""), ("""index""", dict, set("""0123456789""" ), None), ("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""), ("""values""", list, None, None), ("""table""", dict, {"""schema""", """data"""}, """data"""), ] , ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): with io.BytesIO() as buffer: JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , orient=_SCREAMING_SNAKE_CASE ).write() buffer.seek(0 ) _UpperCAmelCase = load_json(_SCREAMING_SNAKE_CASE ) assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_SCREAMING_SNAKE_CASE , """keys""" ) and not hasattr(exported_content[0] , """keys""" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(_SCREAMING_SNAKE_CASE ) == 10 @pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): with io.BytesIO() as buffer: JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , num_proc=2 ).write() buffer.seek(0 ) _UpperCAmelCase = load_json_function(_SCREAMING_SNAKE_CASE ) assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert isinstance(exported_content[0] , _SCREAMING_SNAKE_CASE ) assert len(_SCREAMING_SNAKE_CASE ) == 10 @pytest.mark.parametrize( """orient, container, keys, len_at""" , [ ("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None), ("""split""", dict, {"""columns""", """data"""}, """data"""), ("""index""", dict, set("""0123456789""" ), None), ("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""), ("""values""", list, None, None), ("""table""", dict, {"""schema""", """data"""}, """data"""), ] , ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): with io.BytesIO() as buffer: JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , orient=_SCREAMING_SNAKE_CASE , num_proc=2 ).write() buffer.seek(0 ) _UpperCAmelCase = load_json(_SCREAMING_SNAKE_CASE ) assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_SCREAMING_SNAKE_CASE , """keys""" ) and not hasattr(exported_content[0] , """keys""" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(_SCREAMING_SNAKE_CASE ) == 10 def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): with pytest.raises(_SCREAMING_SNAKE_CASE ): with io.BytesIO() as buffer: JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=0 ) @pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = tmp_path_factory.mktemp("""data""" ) / F"test.json.{extension}" _UpperCAmelCase = str(shared_datadir / F"test_file.json.{extension}" ) JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compression=_SCREAMING_SNAKE_CASE ).write() with fsspec.open(_SCREAMING_SNAKE_CASE , """rb""" , compression="""infer""" ) as f: _UpperCAmelCase = f.read() with fsspec.open(_SCREAMING_SNAKE_CASE , """rb""" , compression="""infer""" ) as f: _UpperCAmelCase = f.read() assert exported_content == original_content
175
1
from maths.prime_check import is_prime def lowerCAmelCase_ ( lowerCamelCase ): if not isinstance(A__ , A__ ): __magic_name__ : Optional[Any] =F"Input value of [number={number}] must be an integer" raise TypeError(A__ ) if is_prime(A__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
21
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase : List[str] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt') lowercase : str = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) lowercase : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def __a ( A__ ) -> Optional[Any]: with open(A__ , "rb" ) as f: lowerCAmelCase = Image.open(A__ ) return im.convert("RGB" ) @dataclass class _lowerCAmelCase : """simple docstring""" lowerCAmelCase = field( default=UpperCamelCase_ , metadata={ 'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).' } , ) lowerCAmelCase = field( default=UpperCamelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) lowerCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'A folder containing the training data.'} ) lowerCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'A folder containing the validation data.'} ) lowerCAmelCase = field( default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} ) lowerCAmelCase = field( default=UpperCamelCase_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) lowerCAmelCase = field( default=UpperCamelCase_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def __A ( self : Optional[int] ) -> List[str]: """simple docstring""" if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( "You must specify either a dataset name from the hub or a train and/or validation directory." ) @dataclass class _lowerCAmelCase : """simple docstring""" lowerCAmelCase = field( default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , ) lowerCAmelCase = field( default=UpperCamelCase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(UpperCamelCase_ )} , ) lowerCAmelCase = field( default=UpperCamelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowerCAmelCase = field( default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} ) lowerCAmelCase = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) lowerCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'Name or path of preprocessor config.'} ) lowerCAmelCase = field( default=UpperCamelCase_ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) lowerCAmelCase = field( default=UpperCamelCase_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , ) def __a ( A__ ) -> Any: lowerCAmelCase = torch.stack([example["pixel_values"] for example in examples] ) lowerCAmelCase = torch.tensor([example["labels"] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def __a ( ) -> Tuple: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_image_classification" , A__ , A__ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowerCAmelCase = training_args.get_process_log_level() logger.setLevel(A__ ) transformers.utils.logging.set_verbosity(A__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(f"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , ) else: lowerCAmelCase = {} if data_args.train_dir is not None: lowerCAmelCase = os.path.join(data_args.train_dir , "**" ) if data_args.validation_dir is not None: lowerCAmelCase = os.path.join(data_args.validation_dir , "**" ) lowerCAmelCase = load_dataset( "imagefolder" , data_files=A__ , cache_dir=model_args.cache_dir , task="image-classification" , ) # If we don't have a validation split, split off a percentage of train as validation. lowerCAmelCase = None if "validation" in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , A__ ) and data_args.train_val_split > 0.0: lowerCAmelCase = dataset["train"].train_test_split(data_args.train_val_split ) lowerCAmelCase = split["train"] lowerCAmelCase = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. lowerCAmelCase = dataset["train"].features["labels"].names lowerCAmelCase , lowerCAmelCase = {}, {} for i, label in enumerate(A__ ): lowerCAmelCase = str(A__ ) lowerCAmelCase = label # Load the accuracy metric from the datasets package lowerCAmelCase = evaluate.load("accuracy" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(A__ ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel=A__ , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCAmelCase = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) lowerCAmelCase = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: lowerCAmelCase = image_processor.size["shortest_edge"] else: lowerCAmelCase = (image_processor.size["height"], image_processor.size["width"]) lowerCAmelCase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) lowerCAmelCase = Compose( [ RandomResizedCrop(A__ ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) lowerCAmelCase = Compose( [ Resize(A__ ), CenterCrop(A__ ), ToTensor(), normalize, ] ) def train_transforms(A__ ): lowerCAmelCase = [ _train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"] ] return example_batch def val_transforms(A__ ): lowerCAmelCase = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: lowerCAmelCase = ( dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(A__ ) if training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: lowerCAmelCase = ( dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(A__ ) # Initalize our trainer lowerCAmelCase = Trainer( model=A__ , args=A__ , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=A__ , tokenizer=A__ , data_collator=A__ , ) # Training if training_args.do_train: lowerCAmelCase = None if training_args.resume_from_checkpoint is not None: lowerCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowerCAmelCase = last_checkpoint lowerCAmelCase = trainer.train(resume_from_checkpoint=A__ ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowerCAmelCase = trainer.evaluate() trainer.log_metrics("eval" , A__ ) trainer.save_metrics("eval" , A__ ) # Write model card and (optionally) push to hub lowerCAmelCase = { "finetuned_from": model_args.model_name_or_path, "tasks": "image-classification", "dataset": data_args.dataset_name, "tags": ["image-classification", "vision"], } if training_args.push_to_hub: trainer.push_to_hub(**A__ ) else: trainer.create_model_card(**A__ ) if __name__ == "__main__": main()
649
0
from __future__ import annotations def _lowerCamelCase ( __a, __a, __a, __a, __a, ): SCREAMING_SNAKE_CASE_ = len(lowerCamelCase_ ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(lowerCamelCase_ ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col], [*diagonal_right_collisions, row - col], [*diagonal_left_collisions, row + col], lowerCamelCase_, lowerCamelCase_, ) def _lowerCamelCase ( __a ): SCREAMING_SNAKE_CASE_ = [] depth_first_search([], [], [], lowerCamelCase_, lowerCamelCase_ ) # Print all the boards for board in boards: for column in board: print(lowerCamelCase_ ) print('''''' ) print(len(lowerCamelCase_ ), '''solutions were found.''' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
702
"""simple docstring""" import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def _lowerCamelCase ( __a ): # picklable for multiprocessing return x.sum() def _lowerCamelCase ( __a ): # picklable for multiprocessing return i + 1 @dataclass class snake_case : UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 class snake_case ( __lowercase ): def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = [1, 2] SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2} SCREAMING_SNAKE_CASE_ = {'''a''': [1, 2], '''b''': [3, 4]} SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 1}, '''b''': 2} SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4} SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = [2, 3] SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3} SCREAMING_SNAKE_CASE_ = {'''a''': [2, 3], '''b''': [4, 5]} SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 2}, '''b''': 3} SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5} self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = 2 self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )} SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 0, '''c''': 2} SCREAMING_SNAKE_CASE_ = { '''a''': np.eye(2 ).astype(SCREAMING_SNAKE_CASE_ ), '''b''': np.zeros(3 ).astype(SCREAMING_SNAKE_CASE_ ), '''c''': np.ones(2 ).astype(SCREAMING_SNAKE_CASE_ ), } self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(SCREAMING_SNAKE_CASE_ ): # can't pickle a local lambda map_nested(lambda SCREAMING_SNAKE_CASE_ : x + 1 , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2} SCREAMING_SNAKE_CASE_ = {'''a''': 3, '''b''': 4} SCREAMING_SNAKE_CASE_ = {'''a''': 5, '''b''': 6} SCREAMING_SNAKE_CASE_ = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ ) def _lowercase (self ): """simple docstring""" class snake_case : UpperCAmelCase__ = '''bar''' SCREAMING_SNAKE_CASE_ = Foo() self.assertEqual(foo.my_attr , '''bar''' ) with temporary_assignment(SCREAMING_SNAKE_CASE_ , '''my_attr''' , '''BAR''' ): self.assertEqual(foo.my_attr , '''BAR''' ) self.assertEqual(foo.my_attr , '''bar''' ) @pytest.mark.parametrize( '''iterable_length, num_proc, expected_num_proc''', [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ], ) def _lowerCamelCase ( __a, __a, __a ): with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch( '''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool: SCREAMING_SNAKE_CASE_ = {F'{i}': i for i in range(__a )} SCREAMING_SNAKE_CASE_ = map_nested(lambda __a : x + 10, __a, num_proc=__a, parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class snake_case ( __lowercase ): @require_tf def _lowercase (self ): """simple docstring""" import tensorflow as tf from tensorflow.keras import layers SCREAMING_SNAKE_CASE_ = layers.Dense(2 ) def gen_random_output(): SCREAMING_SNAKE_CASE_ = tf.random.uniform((1, 3) ) return model(SCREAMING_SNAKE_CASE_ ).numpy() with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE_ = gen_random_output() with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE_ = gen_random_output() SCREAMING_SNAKE_CASE_ = gen_random_output() np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def _lowercase (self ): """simple docstring""" import torch def gen_random_output(): SCREAMING_SNAKE_CASE_ = torch.nn.Linear(3 , 2 ) SCREAMING_SNAKE_CASE_ = torch.rand(1 , 3 ) return model(SCREAMING_SNAKE_CASE_ ).detach().numpy() with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE_ = gen_random_output() with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE_ = gen_random_output() SCREAMING_SNAKE_CASE_ = gen_random_output() np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def _lowercase (self ): """simple docstring""" def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): SCREAMING_SNAKE_CASE_ = gen_random_output() with temp_seed(42 ): SCREAMING_SNAKE_CASE_ = gen_random_output() SCREAMING_SNAKE_CASE_ = gen_random_output() np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize('''input_data''', [{}] ) def _lowerCamelCase ( __a ): SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).data assert output_data == input_data @pytest.mark.parametrize( '''data, expected_output''', [ ({}, []), ([], []), ('''foo''', ['''foo''']), (['''foo''', '''bar'''], ['''foo''', '''bar''']), ([['''foo''', '''bar''']], ['''foo''', '''bar''']), ([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']), ([[['''foo'''], '''bar''']], ['''foo''', '''bar''']), ({'''a''': 1, '''b''': 2}, [1, 2]), ({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]), ({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]), ({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]), ({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]), ({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]), ], ) def _lowerCamelCase ( __a, __a ): SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).flatten() assert output == expected_output def _lowerCamelCase ( ): SCREAMING_SNAKE_CASE_ = A(x=1, y='''foobar''' ) SCREAMING_SNAKE_CASE_ = {'''x''': 1, '''y''': '''foobar'''} assert asdict(__a ) == expected_output SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': A(x=10, y='''foo''' )}, '''c''': [A(x=20, y='''bar''' )]} SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]} assert asdict(__a ) == expected_output with pytest.raises(__a ): asdict([1, A(x=10, y='''foo''' )] ) def _lowerCamelCase ( __a ): return text.split() def _lowerCamelCase ( __a ): yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def _lowerCamelCase ( ): with Pool(2 ) as pool: SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) ) assert out.count('''hello''' ) == 10 assert out.count('''there''' ) == 10 assert len(__a ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) ) assert out.count('''hello''' ) == 10 assert out.count('''there''' ) == 10 assert len(__a ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: SCREAMING_SNAKE_CASE_ = [] for yield_time, content in iflatmap_unordered( __a, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(__a ) assert out.count('''a''' ) == 2 assert out.count('''b''' ) == 2 assert len(__a ) == 4
628
0
'''simple docstring''' from __future__ import annotations import time import numpy as np UpperCamelCase__ : List[Any] = [8, 5, 9, 7] UpperCamelCase__ : List[str] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] UpperCamelCase__ : int = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class _lowercase : '''simple docstring''' def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,) -> None: '''simple docstring''' UpperCAmelCase__ : Tuple = claim_vector UpperCAmelCase__ : List[str] = allocated_resources_table UpperCAmelCase__ : int = maximum_claim_table def lowerCAmelCase__ ( self ) -> list[int]: '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def lowerCAmelCase__ ( self ) -> list[int]: '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def lowerCAmelCase__ ( self ) -> list[list[int]]: '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(lowerCamelCase_ ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def lowerCAmelCase__ ( self ) -> dict[int, list[int]]: '''simple docstring''' return {self.__need().index(lowerCamelCase_ ): i for i in self.__need()} def lowerCAmelCase__ ( self ,**lowerCamelCase_ ) -> None: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.__need() UpperCAmelCase__ : Dict = self.__allocated_resources_table UpperCAmelCase__ : Any = self.__available_resources() UpperCAmelCase__ : str = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('''_''' * 50 + '''\n''' ) while need_list: UpperCAmelCase__ : Optional[int] = False for each_need in need_list: UpperCAmelCase__ : Tuple = True for index, need in enumerate(lowerCamelCase_ ): if need > available_resources[index]: UpperCAmelCase__ : Tuple = False break if execution: UpperCAmelCase__ : str = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: UpperCAmelCase__ : List[Any] = original_need_index print(f'''Process {process_number + 1} is executing.''' ) # remove the process run from stack need_list.remove(lowerCamelCase_ ) # update available/freed resources stack UpperCAmelCase__ : List[Any] = np.array(lowerCamelCase_ ) + np.array( alloc_resources_table[process_number] ) print( '''Updated available resource stack for processes: ''' + ''' '''.join([str(lowerCamelCase_ ) for x in available_resources] ) ) break if safe: print('''The process is in a safe state.\n''' ) else: print('''System in unsafe state. Aborting...\n''' ) break def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' print(''' ''' * 9 + '''Allocated Resource Table''' ) for item in self.__allocated_resources_table: print( f'''P{self.__allocated_resources_table.index(lowerCamelCase_ ) + 1}''' + ''' '''.join(f'''{it:>8}''' for it in item ) + '''\n''' ) print(''' ''' * 9 + '''System Resource Table''' ) for item in self.__maximum_claim_table: print( f'''P{self.__maximum_claim_table.index(lowerCamelCase_ ) + 1}''' + ''' '''.join(f'''{it:>8}''' for it in item ) + '''\n''' ) print( '''Current Usage by Active Processes: ''' + ''' '''.join(str(lowerCamelCase_ ) for x in self.__claim_vector ) ) print( '''Initial Available Resources: ''' + ''' '''.join(str(lowerCamelCase_ ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
614
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=lowerCAmelCase ) class _lowercase ( lowerCAmelCase ): '''simple docstring''' UpperCAmelCase_ : str = field(default='''question-answering-extractive''' ,metadata={'''include_in_asdict_even_if_is_default''': True} ) UpperCAmelCase_ : ClassVar[Features] = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} ) UpperCAmelCase_ : ClassVar[Features] = Features( { '''answers''': Sequence( { '''text''': Value('''string''' ), '''answer_start''': Value('''int32''' ), } ) } ) UpperCAmelCase_ : str = "question" UpperCAmelCase_ : str = "context" UpperCAmelCase_ : str = "answers" @property def lowerCAmelCase__ ( self ) -> Dict[str, str]: '''simple docstring''' return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
614
1
def a ( A__ : int ) -> int: """simple docstring""" assert isinstance(A__ , A__ ), F'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: _lowercase =F'''The input value of [n={number}] has to be > 0''' raise ValueError(A__ ) else: _lowercase =sylvester(number - 1 ) _lowercase =num - 1 _lowercase =num return lower * upper + 1 if __name__ == "__main__": print(f"The 8th number in Sylvester's sequence: {sylvester(8)}")
380
def a ( A__ : Optional[int] ) -> Tuple: """simple docstring""" _lowercase =[0] * len(A__ ) _lowercase =[] _lowercase =[] _lowercase =0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(A__ ) ): if indegree[i] == 0: queue.append(A__ ) while queue: _lowercase =queue.pop(0 ) cnt += 1 topo.append(A__ ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(A__ ) if cnt != len(A__ ): print('Cycle exists' ) else: print(A__ ) # Adjacency List of Graph lowercase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
380
1
import fcntl import os import socket import torch import torch.distributed as dist def lowerCamelCase__ ( *__lowerCamelCase : Dict ): with open(UpperCamelCase_ , """r""" ) as fh: fcntl.flock(UpperCamelCase_ , fcntl.LOCK_EX ) try: print(*UpperCamelCase_ ) finally: fcntl.flock(UpperCamelCase_ , fcntl.LOCK_UN ) a : List[Any] = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) a : Tuple = torch.device("cuda", local_rank) a : List[str] = socket.gethostname() a : str = f"""[{hostname}-{local_rank}]""" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank a : str = dist.get_rank() a : List[str] = dist.get_world_size() printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""") dist.barrier() if rank == 0: printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""") except Exception: printflock(f"""{gpu} is broken""") raise
63
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : Optional[int] = '''speech_to_text''' __lowercase : List[str] = ['''past_key_values'''] __lowercase : str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , lowerCAmelCase__=1_0_0_0_0 , lowerCAmelCase__=1_2 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=4 , lowerCAmelCase__=6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=4 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=6_0_0_0 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=2 , lowerCAmelCase__=(5, 5) , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=8_0 , lowerCAmelCase__=1 , **lowerCAmelCase__ , ): __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = d_model __SCREAMING_SNAKE_CASE = encoder_ffn_dim __SCREAMING_SNAKE_CASE = encoder_layers __SCREAMING_SNAKE_CASE = encoder_attention_heads __SCREAMING_SNAKE_CASE = decoder_ffn_dim __SCREAMING_SNAKE_CASE = decoder_layers __SCREAMING_SNAKE_CASE = decoder_attention_heads __SCREAMING_SNAKE_CASE = dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = activation_dropout __SCREAMING_SNAKE_CASE = activation_function __SCREAMING_SNAKE_CASE = init_std __SCREAMING_SNAKE_CASE = encoder_layerdrop __SCREAMING_SNAKE_CASE = decoder_layerdrop __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = encoder_layers __SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True __SCREAMING_SNAKE_CASE = max_source_positions __SCREAMING_SNAKE_CASE = max_target_positions __SCREAMING_SNAKE_CASE = num_conv_layers __SCREAMING_SNAKE_CASE = list(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = conv_channels __SCREAMING_SNAKE_CASE = input_feat_per_channel __SCREAMING_SNAKE_CASE = input_channels if len(self.conv_kernel_sizes) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """ f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, " f"`config.num_conv_layers = {self.num_conv_layers}`.") super().__init__( pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
155
0
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowercase_ ( SCREAMING_SNAKE_CASE : int ): """simple docstring""" snake_case__ : int =int(number**0.5 ) return number == sq * sq def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): """simple docstring""" snake_case__ : int =x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den snake_case__ : int =x_den * y_den * z_den snake_case__ : int =gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) top //= hcf bottom //= hcf return top, bottom def lowercase_ ( SCREAMING_SNAKE_CASE : int = 35 ): """simple docstring""" snake_case__ : set =set() snake_case__ : int snake_case__ : Fraction =Fraction(0 ) snake_case__ : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 snake_case__ : List[str] =x_num * y_den + x_den * y_num snake_case__ : Any =x_den * y_den snake_case__ : Dict =gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: snake_case__ : Optional[int] =add_three( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) unique_s.add(SCREAMING_SNAKE_CASE ) # n=2 snake_case__ : Optional[Any] =( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) snake_case__ : Optional[int] =x_den * x_den * y_den * y_den if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ): snake_case__ : Any =int(sqrt(SCREAMING_SNAKE_CASE ) ) snake_case__ : Tuple =int(sqrt(SCREAMING_SNAKE_CASE ) ) snake_case__ : Optional[int] =gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: snake_case__ : str =add_three( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) unique_s.add(SCREAMING_SNAKE_CASE ) # n=-1 snake_case__ : str =x_num * y_num snake_case__ : str =x_den * y_num + x_num * y_den snake_case__ : int =gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: snake_case__ : List[Any] =add_three( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) unique_s.add(SCREAMING_SNAKE_CASE ) # n=2 snake_case__ : Dict =x_num * x_num * y_num * y_num snake_case__ : str =( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ): snake_case__ : Dict =int(sqrt(SCREAMING_SNAKE_CASE ) ) snake_case__ : Optional[Any] =int(sqrt(SCREAMING_SNAKE_CASE ) ) snake_case__ : int =gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: snake_case__ : Dict =add_three( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) unique_s.add(SCREAMING_SNAKE_CASE ) for num, den in unique_s: total += Fraction(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return total.denominator + total.numerator if __name__ == "__main__": print(F"""{solution() = }""")
408
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> Any: """simple docstring""" snake_case__ : int =torch.nn.Linear(10 , 10 ) snake_case__ : int =torch.optim.SGD(model.parameters() , 0.1 ) snake_case__ : str =Accelerator() snake_case__ : Any =accelerator.prepare(__SCREAMING_SNAKE_CASE ) try: pickle.loads(pickle.dumps(__SCREAMING_SNAKE_CASE ) ) except Exception as e: self.fail(f'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
408
1
"""simple docstring""" from math import sqrt def UpperCamelCase (SCREAMING_SNAKE_CASE ): assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' must been an int and positive" UpperCamelCase : Dict = True # 0 and 1 are none primes. if number <= 1: UpperCamelCase : Optional[int] = False for divisor in range(2 , int(round(sqrt(SCREAMING_SNAKE_CASE ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: UpperCamelCase : Any = False break # precondition assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "'status' must been from type bool" return status def UpperCamelCase (SCREAMING_SNAKE_CASE ): assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N UpperCamelCase : str = list(range(2 , n + 1 ) ) UpperCamelCase : Optional[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(SCREAMING_SNAKE_CASE ) ): for j in range(i + 1 , len(SCREAMING_SNAKE_CASE ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): UpperCamelCase : Union[str, Any] = 0 # filters actual prime numbers. UpperCamelCase : str = [x for x in begin_list if x != 0] # precondition assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "'ans' must been from type list" return ans def UpperCamelCase (SCREAMING_SNAKE_CASE ): assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2" UpperCamelCase : Dict = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(SCREAMING_SNAKE_CASE ): ans.append(SCREAMING_SNAKE_CASE ) # precondition assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "'ans' must been from type list" return ans def UpperCamelCase (SCREAMING_SNAKE_CASE ): assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and number >= 0, "'number' must been an int and >= 0" UpperCamelCase : Tuple = [] # this list will be returns of the function. # potential prime number factors. UpperCamelCase : Union[str, Any] = 2 UpperCamelCase : Tuple = number if number == 0 or number == 1: ans.append(SCREAMING_SNAKE_CASE ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(SCREAMING_SNAKE_CASE ): while quotient != 1: if is_prime(SCREAMING_SNAKE_CASE ) and (quotient % factor == 0): ans.append(SCREAMING_SNAKE_CASE ) quotient /= factor else: factor += 1 else: ans.append(SCREAMING_SNAKE_CASE ) # precondition assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "'ans' must been from type list" return ans def UpperCamelCase (SCREAMING_SNAKE_CASE ): assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : int = prime_factorization(SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = max(SCREAMING_SNAKE_CASE ) # precondition assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "'ans' must been from type int" return ans def UpperCamelCase (SCREAMING_SNAKE_CASE ): assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Tuple = prime_factorization(SCREAMING_SNAKE_CASE ) UpperCamelCase : str = min(SCREAMING_SNAKE_CASE ) # precondition assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "'ans' must been from type int" return ans def UpperCamelCase (SCREAMING_SNAKE_CASE ): assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "'number' must been an int" assert isinstance(number % 2 == 0 , SCREAMING_SNAKE_CASE ), "compare bust been from type bool" return number % 2 == 0 def UpperCamelCase (SCREAMING_SNAKE_CASE ): assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "'number' must been an int" assert isinstance(number % 2 != 0 , SCREAMING_SNAKE_CASE ), "compare bust been from type bool" return number % 2 != 0 def UpperCamelCase (SCREAMING_SNAKE_CASE ): assert ( isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (number > 2) and is_even(SCREAMING_SNAKE_CASE ) ), "'number' must been an int, even and > 2" UpperCamelCase : Tuple = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' UpperCamelCase : Dict = get_prime_numbers(SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = len(SCREAMING_SNAKE_CASE ) # run variable for while-loops. UpperCamelCase : Optional[int] = 0 UpperCamelCase : Union[str, Any] = None # exit variable. for break up the loops UpperCamelCase : str = True while i < len_pn and loop: UpperCamelCase : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: UpperCamelCase : Any = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (len(SCREAMING_SNAKE_CASE ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): assert ( isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Optional[int] = 0 while numbera != 0: UpperCamelCase : Union[str, Any] = numbera % numbera UpperCamelCase : Any = numbera UpperCamelCase : Optional[Any] = rest # precondition assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): assert ( isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : List[Any] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' UpperCamelCase : Dict = prime_factorization(SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = prime_factorization(SCREAMING_SNAKE_CASE ) elif numbera == 1 or numbera == 1: UpperCamelCase : Optional[Any] = [] UpperCamelCase : List[str] = [] UpperCamelCase : str = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = 0 UpperCamelCase : int = 0 UpperCamelCase : int = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: UpperCamelCase : Optional[Any] = prime_fac_a.count(SCREAMING_SNAKE_CASE ) UpperCamelCase : str = prime_fac_a.count(SCREAMING_SNAKE_CASE ) for _ in range(max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ): ans *= n else: UpperCamelCase : Tuple = prime_fac_a.count(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ): ans *= n done.append(SCREAMING_SNAKE_CASE ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: UpperCamelCase : Optional[Any] = prime_fac_a.count(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ): ans *= n done.append(SCREAMING_SNAKE_CASE ) # precondition assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def UpperCamelCase (SCREAMING_SNAKE_CASE ): assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (n >= 0), "'number' must been a positive int" UpperCamelCase : int = 0 UpperCamelCase : Optional[Any] = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(SCREAMING_SNAKE_CASE ): ans += 1 # precondition assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and is_prime( SCREAMING_SNAKE_CASE ), "'ans' must been a prime number and from type int" return ans def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): assert ( is_prime(SCREAMING_SNAKE_CASE ) and is_prime(SCREAMING_SNAKE_CASE ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" UpperCamelCase : int = p_number_a + 1 # jump to the next number UpperCamelCase : Optional[Any] = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(SCREAMING_SNAKE_CASE ): number += 1 while number < p_number_a: ans.append(SCREAMING_SNAKE_CASE ) number += 1 # fetch the next prime number. while not is_prime(SCREAMING_SNAKE_CASE ): number += 1 # precondition assert ( isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and ans[0] != p_number_a and ans[len(SCREAMING_SNAKE_CASE ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def UpperCamelCase (SCREAMING_SNAKE_CASE ): assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (n >= 1), "'n' must been int and >= 1" UpperCamelCase : Tuple = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(SCREAMING_SNAKE_CASE ) # precondition assert ans[0] == 1 and ans[len(SCREAMING_SNAKE_CASE ) - 1] == n, "Error in function getDivisiors(...)" return ans def UpperCamelCase (SCREAMING_SNAKE_CASE ): assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and ( number > 1 ), "'number' must been an int and >= 1" UpperCamelCase : Any = get_divisors(SCREAMING_SNAKE_CASE ) # precondition assert ( isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (divisors[0] == 1) and (divisors[len(SCREAMING_SNAKE_CASE ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): assert ( isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. UpperCamelCase : Dict = gcd(abs(SCREAMING_SNAKE_CASE ) , abs(SCREAMING_SNAKE_CASE ) ) # precondition assert ( isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def UpperCamelCase (SCREAMING_SNAKE_CASE ): assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been a int and >= 0" UpperCamelCase : Optional[int] = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def UpperCamelCase (SCREAMING_SNAKE_CASE ): assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been an int and >= 0" UpperCamelCase : List[str] = 0 UpperCamelCase : Union[str, Any] = 1 UpperCamelCase : Dict = 1 # this will be return for _ in range(n - 1 ): UpperCamelCase : int = ans ans += fiba UpperCamelCase : Dict = tmp return ans
102
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __magic_name__ : int = { """configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""], """configuration_data2vec_text""": [ """DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecTextConfig""", """Data2VecTextOnnxConfig""", ], """configuration_data2vec_vision""": [ """DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecVisionConfig""", """Data2VecVisionOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : List[Any] = [ """DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecAudioForAudioFrameClassification""", """Data2VecAudioForCTC""", """Data2VecAudioForSequenceClassification""", """Data2VecAudioForXVector""", """Data2VecAudioModel""", """Data2VecAudioPreTrainedModel""", ] __magic_name__ : Optional[int] = [ """DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecTextForCausalLM""", """Data2VecTextForMaskedLM""", """Data2VecTextForMultipleChoice""", """Data2VecTextForQuestionAnswering""", """Data2VecTextForSequenceClassification""", """Data2VecTextForTokenClassification""", """Data2VecTextModel""", """Data2VecTextPreTrainedModel""", ] __magic_name__ : str = [ """DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecVisionForImageClassification""", """Data2VecVisionForMaskedImageModeling""", """Data2VecVisionForSemanticSegmentation""", """Data2VecVisionModel""", """Data2VecVisionPreTrainedModel""", ] if is_tf_available(): __magic_name__ : Optional[Any] = [ """TFData2VecVisionForImageClassification""", """TFData2VecVisionForSemanticSegmentation""", """TFData2VecVisionModel""", """TFData2VecVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __magic_name__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
102
1
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys lowercase_ = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''') lowercase_ = ( subprocess.check_output(f"git diff --diff-filter=d --name-only {fork_point_sha}".split()).decode('''utf-8''').split() ) lowercase_ = '''|'''.join(sys.argv[1:]) lowercase_ = re.compile(rf"^({joined_dirs}).*?\.py$") lowercase_ = [x for x in modified_files if regex.match(x)] print(''' '''.join(relevant_modified_files), end='''''')
336
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowerCamelCase__ : Any =LDMTextToImagePipeline lowerCamelCase__ : Tuple =TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } lowerCamelCase__ : Union[str, Any] =PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } lowerCamelCase__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS lowerCamelCase__ : str =False def lowercase ( self ) -> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) __magic_name__ : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) __magic_name__ : Tuple = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , ) torch.manual_seed(0 ) __magic_name__ : Optional[Any] = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , ) torch.manual_seed(0 ) __magic_name__ : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) __magic_name__ : Tuple = CLIPTextModel(lowerCamelCase ) __magic_name__ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __magic_name__ : Any = { '''unet''': unet, '''scheduler''': scheduler, '''vqvae''': vae, '''bert''': text_encoder, '''tokenizer''': tokenizer, } return components def lowercase ( self , lowerCamelCase , lowerCamelCase=0 ) -> Any: """simple docstring""" if str(lowerCamelCase ).startswith('''mps''' ): __magic_name__ : int = torch.manual_seed(lowerCamelCase ) else: __magic_name__ : Optional[int] = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __magic_name__ : Dict = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def lowercase ( self ) -> Tuple: """simple docstring""" __magic_name__ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __magic_name__ : List[str] = self.get_dummy_components() __magic_name__ : List[str] = LDMTextToImagePipeline(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __magic_name__ : List[Any] = self.get_dummy_inputs(lowerCamelCase ) __magic_name__ : Tuple = pipe(**lowerCamelCase ).images __magic_name__ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) __magic_name__ : Optional[int] = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class A__ ( unittest.TestCase ): def lowercase ( self ) -> Union[str, Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase ( self , lowerCamelCase , lowerCamelCase=torch.floataa , lowerCamelCase=0 ) -> int: """simple docstring""" __magic_name__ : str = torch.manual_seed(lowerCamelCase ) __magic_name__ : str = np.random.RandomState(lowerCamelCase ).standard_normal((1, 4, 32, 32) ) __magic_name__ : str = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase ) __magic_name__ : Optional[Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def lowercase ( self ) -> Tuple: """simple docstring""" __magic_name__ : List[str] = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __magic_name__ : Optional[int] = self.get_inputs(lowerCamelCase ) __magic_name__ : Any = pipe(**lowerCamelCase ).images __magic_name__ : Optional[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) __magic_name__ : List[str] = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] ) __magic_name__ : List[str] = np.abs(expected_slice - image_slice ).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class A__ ( unittest.TestCase ): def lowercase ( self ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase ( self , lowerCamelCase , lowerCamelCase=torch.floataa , lowerCamelCase=0 ) -> int: """simple docstring""" __magic_name__ : Optional[Any] = torch.manual_seed(lowerCamelCase ) __magic_name__ : Optional[Any] = np.random.RandomState(lowerCamelCase ).standard_normal((1, 4, 32, 32) ) __magic_name__ : Dict = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase ) __magic_name__ : Dict = { '''prompt''': '''A painting of a squirrel eating a burger''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 50, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def lowercase ( self ) -> Dict: """simple docstring""" __magic_name__ : Optional[int] = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __magic_name__ : Optional[Any] = self.get_inputs(lowerCamelCase ) __magic_name__ : Optional[Any] = pipe(**lowerCamelCase ).images[0] __magic_name__ : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' ) __magic_name__ : Optional[int] = np.abs(expected_image - image ).max() assert max_diff < 1e-3
336
1
import math class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__=0 ) -> Optional[Any]: # a graph with Node 0,1,...,N-1 '''simple docstring''' __lowerCamelCase = n __lowerCamelCase = [ [math.inf for j in range(0 , lowerCamelCase__ )] for i in range(0 , lowerCamelCase__ ) ] # adjacency matrix for weight __lowerCamelCase = [ [math.inf for j in range(0 , lowerCamelCase__ )] for i in range(0 , lowerCamelCase__ ) ] # dp[i][j] stores minimum distance from i to j def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = w def lowercase_ ( self ) -> List[str]: '''simple docstring''' for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): __lowerCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' return self.dp[u][v] if __name__ == "__main__": __A = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
469
from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''vit_mae''' def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=224 , lowerCamelCase__=16 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=16 , lowerCamelCase__=512 , lowerCamelCase__=8 , lowerCamelCase__=2_048 , lowerCamelCase__=0.75 , lowerCamelCase__=False , **lowerCamelCase__ , ) -> Optional[int]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = qkv_bias __lowerCamelCase = decoder_num_attention_heads __lowerCamelCase = decoder_hidden_size __lowerCamelCase = decoder_num_hidden_layers __lowerCamelCase = decoder_intermediate_size __lowerCamelCase = mask_ratio __lowerCamelCase = norm_pix_loss
469
1
import argparse import os import re import packaging.version a_ = """examples/""" a_ = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } a_ = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } a_ = """README.md""" def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : Tuple ,_UpperCamelCase : Any ): with open(_UpperCamelCase ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: __lowerCamelCase = f.read() __lowerCamelCase ,__lowerCamelCase = REPLACE_PATTERNS[pattern] __lowerCamelCase = replace.replace('''VERSION''' ,_UpperCamelCase ) __lowerCamelCase = re_pattern.sub(_UpperCamelCase ,_UpperCamelCase ) with open(_UpperCamelCase ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: f.write(_UpperCamelCase ) def a__ ( _UpperCamelCase : Union[str, Any] ): for folder, directories, fnames in os.walk(_UpperCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(_UpperCamelCase ,_UpperCamelCase ) ,_UpperCamelCase ,pattern='''examples''' ) def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : List[Any]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) if not patch: update_version_in_examples(_UpperCamelCase ) def a__ ( ): __lowerCamelCase = '''🤗 Transformers currently provides the following architectures''' __lowerCamelCase = '''1. Want to contribute a new model?''' with open(_UpperCamelCase ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: __lowerCamelCase = f.readlines() # Find the start of the list. __lowerCamelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __lowerCamelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): __lowerCamelCase = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,) index += 1 with open(_UpperCamelCase ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: f.writelines(_UpperCamelCase ) def a__ ( ): with open(REPLACE_FILES['''init'''] ,'''r''' ) as f: __lowerCamelCase = f.read() __lowerCamelCase = REPLACE_PATTERNS['''init'''][0].search(_UpperCamelCase ).groups()[0] return packaging.version.parse(_UpperCamelCase ) def a__ ( _UpperCamelCase : Optional[Any]=False ): __lowerCamelCase = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: __lowerCamelCase = default_version.base_version elif patch: __lowerCamelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: __lowerCamelCase = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. __lowerCamelCase = input(F"""Which version are you releasing? [{default_version}]""" ) if len(_UpperCamelCase ) == 0: __lowerCamelCase = default_version print(F"""Updating version to {version}.""" ) global_version_update(_UpperCamelCase ,patch=_UpperCamelCase ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def a__ ( ): __lowerCamelCase = get_version() __lowerCamelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" __lowerCamelCase = current_version.base_version # Check with the user we got that right. __lowerCamelCase = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(_UpperCamelCase ) == 0: __lowerCamelCase = dev_version print(F"""Updating version to {version}.""" ) global_version_update(_UpperCamelCase ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") a_ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
622
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ): if isinstance(_UpperCamelCase ,_UpperCamelCase ): __lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length, 2) ,_UpperCamelCase ) else: __lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length) ,_UpperCamelCase ) for i, tensor in enumerate(_UpperCamelCase ): if padding_side == "right": if isinstance(_UpperCamelCase ,_UpperCamelCase ): __lowerCamelCase = tensor[:sequence_length] else: __lowerCamelCase = tensor[:sequence_length] else: if isinstance(_UpperCamelCase ,_UpperCamelCase ): __lowerCamelCase = tensor[:sequence_length] else: __lowerCamelCase = tensor[:sequence_length] return out_tensor.tolist() def a__ ( _UpperCamelCase : Dict ): __lowerCamelCase = ord(_UpperCamelCase ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26): return True __lowerCamelCase = unicodedata.category(_UpperCamelCase ) if cat.startswith('''P''' ): return True return False @dataclass class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = 42 lowerCAmelCase__ = True lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = -1_0_0 lowerCAmelCase__ = "pt" def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' import torch __lowerCamelCase = '''label''' if '''label''' in features[0].keys() else '''labels''' __lowerCamelCase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __lowerCamelCase = self.tokenizer.pad( __UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch __lowerCamelCase = torch.tensor(batch['''entity_ids'''] ).shape[1] __lowerCamelCase = self.tokenizer.padding_side if padding_side == "right": __lowerCamelCase = [ list(__UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) for label in labels ] else: __lowerCamelCase = [ [self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) + list(__UpperCAmelCase ) for label in labels ] __lowerCamelCase = [feature['''ner_tags'''] for feature in features] __lowerCamelCase = padding_tensor(__UpperCAmelCase , -1 , __UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = [feature['''original_entity_spans'''] for feature in features] __lowerCamelCase = padding_tensor(__UpperCAmelCase , (-1, -1) , __UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = {k: torch.tensor(__UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()} return batch
622
1
from __future__ import annotations def __snake_case ( __UpperCamelCase : dict ,__UpperCamelCase : str ): """simple docstring""" A_ , A_ = set(__UpperCamelCase ), [start] while stack: A_ = stack.pop() explored.add(__UpperCamelCase ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__UpperCamelCase ) return explored __a :str = { 'A': ['B', 'C', 'D'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F'], 'D': ['B', 'D'], 'E': ['B', 'F'], 'F': ['C', 'E', 'G'], 'G': ['F'], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, 'A'))
86
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
165
0
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class SCREAMING_SNAKE_CASE__ ( __a ): '''simple docstring''' _lowerCamelCase = (IPNDMScheduler,) _lowerCamelCase = (('''num_inference_steps''', 50),) def lowerCamelCase ( self : Union[str, Any] , **lowerCamelCase : Union[str, Any] ) -> Any: """simple docstring""" _UpperCAmelCase = {"""num_train_timesteps""": 1000} config.update(**a_ ) return config def lowerCamelCase ( self : Tuple , lowerCamelCase : Optional[int]=0 , **lowerCamelCase : int ) -> Dict: """simple docstring""" _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop("""num_inference_steps""" , a_ ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**a_ ) _UpperCAmelCase = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[:] if time_step is None: _UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _UpperCAmelCase = scheduler_class.from_pretrained(a_ ) new_scheduler.set_timesteps(a_ ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[:] _UpperCAmelCase = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample _UpperCAmelCase = new_scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _UpperCAmelCase = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample _UpperCAmelCase = new_scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCamelCase ( self : List[str] ) -> Dict: """simple docstring""" pass def lowerCamelCase ( self : str , lowerCamelCase : Any=0 , **lowerCamelCase : Tuple ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop("""num_inference_steps""" , a_ ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[:] if time_step is None: _UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _UpperCAmelCase = scheduler_class.from_pretrained(a_ ) # copy over dummy past residuals new_scheduler.set_timesteps(a_ ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[:] _UpperCAmelCase = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample _UpperCAmelCase = new_scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _UpperCAmelCase = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample _UpperCAmelCase = new_scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCamelCase ( self : List[Any] , **lowerCamelCase : Optional[int] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**a_ ) _UpperCAmelCase = scheduler_class(**a_ ) _UpperCAmelCase = 10 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(a_ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(a_ , a_ ) _UpperCAmelCase = scheduler.step(a_ , a_ , a_ ).prev_sample for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(a_ , a_ ) _UpperCAmelCase = scheduler.step(a_ , a_ , a_ ).prev_sample return sample def lowerCamelCase ( self : Any ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop("""num_inference_steps""" , a_ ) for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**a_ ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(a_ , """set_timesteps""" ): scheduler.set_timesteps(a_ ) elif num_inference_steps is not None and not hasattr(a_ , """set_timesteps""" ): _UpperCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] _UpperCAmelCase = dummy_past_residuals[:] _UpperCAmelCase = scheduler.timesteps[5] _UpperCAmelCase = scheduler.timesteps[6] _UpperCAmelCase = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample _UpperCAmelCase = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) _UpperCAmelCase = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample _UpperCAmelCase = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=a_ , time_step=a_ ) def lowerCamelCase ( self : Any ) -> List[Any]: """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=a_ , time_step=a_ ) def lowerCamelCase ( self : Optional[Any] ) -> Any: """simple docstring""" _UpperCAmelCase = self.full_loop() _UpperCAmelCase = torch.mean(torch.abs(a_ ) ) assert abs(result_mean.item() - 254_0529 ) < 10
706
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a: List[str] = { '''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a: Any = ['''VivitImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a: str = [ '''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''VivitModel''', '''VivitPreTrainedModel''', '''VivitForVideoClassification''', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys __a: Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
402
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : Tuple = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=False ) -> str: """simple docstring""" a = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''deit.embeddings.cls_token'''), ('''dist_token''', '''deit.embeddings.distillation_token'''), ('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''deit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" a = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ('''norm.weight''', '''deit.layernorm.weight'''), ('''norm.bias''', '''deit.layernorm.bias'''), ('''head.weight''', '''cls_classifier.weight'''), ('''head.bias''', '''cls_classifier.bias'''), ('''head_dist.weight''', '''distillation_classifier.weight'''), ('''head_dist.bias''', '''distillation_classifier.bias'''), ] ) return rename_keys def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=False ) -> Optional[int]: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: a = '''''' else: a = '''deit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) a = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) a = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict a = in_proj_weight[ : config.hidden_size, : ] a = in_proj_bias[: config.hidden_size] a = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] a = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] a = in_proj_weight[ -config.hidden_size :, : ] a = in_proj_bias[-config.hidden_size :] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> Dict: """simple docstring""" a = dct.pop(snake_case_ ) a = val def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]: """simple docstring""" a = '''http://images.cocodataset.org/val2017/000000039769.jpg''' a = Image.open(requests.get(snake_case_, stream=snake_case_ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Dict: """simple docstring""" a = DeiTConfig() # all deit models have fine-tuned heads a = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size a = 1_0_0_0 a = '''huggingface/label-files''' a = '''imagenet-1k-id2label.json''' a = json.load(open(hf_hub_download(snake_case_, snake_case_, repo_type='''dataset''' ), '''r''' ) ) a = {int(snake_case_ ): v for k, v in idalabel.items()} a = idalabel a = {v: k for k, v in idalabel.items()} a = int(deit_name[-6:-4] ) a = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith('''tiny''' ): a = 1_9_2 a = 7_6_8 a = 1_2 a = 3 elif deit_name[9:].startswith('''small''' ): a = 3_8_4 a = 1_5_3_6 a = 1_2 a = 6 if deit_name[9:].startswith('''base''' ): pass elif deit_name[4:].startswith('''large''' ): a = 1_0_2_4 a = 4_0_9_6 a = 2_4 a = 1_6 # load original model from timm a = timm.create_model(snake_case_, pretrained=snake_case_ ) timm_model.eval() # load state_dict of original model, remove and rename some keys a = timm_model.state_dict() a = create_rename_keys(snake_case_, snake_case_ ) for src, dest in rename_keys: rename_key(snake_case_, snake_case_, snake_case_ ) read_in_q_k_v(snake_case_, snake_case_, snake_case_ ) # load HuggingFace model a = DeiTForImageClassificationWithTeacher(snake_case_ ).eval() model.load_state_dict(snake_case_ ) # Check outputs on an image, prepared by DeiTImageProcessor a = int( (2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 a = DeiTImageProcessor(size=snake_case_, crop_size=config.image_size ) a = image_processor(images=prepare_img(), return_tensors='''pt''' ) a = encoding['''pixel_values'''] a = model(snake_case_ ) a = timm_model(snake_case_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(snake_case_, outputs.logits, atol=1e-3 ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(snake_case_ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(snake_case_ ) if __name__ == "__main__": UpperCamelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--deit_name""", default="""vit_deit_base_distilled_patch16_224""", type=str, help="""Name of the DeiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) UpperCamelCase__ : Optional[Any] = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
387
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
387
1
import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser( description=( '''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned''' ''' Distillation''' ) ) parser.add_argument('''--model_type''', default='''bert''', choices=['''bert''']) parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str) parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str) parser.add_argument('''--vocab_transform''', action='''store_true''') UpperCAmelCase_ = parser.parse_args() if args.model_type == "bert": UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name) UpperCAmelCase_ = '''bert''' else: raise ValueError('''args.model_type should be "bert".''') UpperCAmelCase_ = model.state_dict() UpperCAmelCase_ = {} for w in ["word_embeddings", "position_embeddings"]: UpperCAmelCase_ = state_dict[F"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"] UpperCAmelCase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] UpperCAmelCase_ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] UpperCAmelCase_ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] UpperCAmelCase_ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] UpperCAmelCase_ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] UpperCAmelCase_ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 UpperCAmelCase_ = state_dict['''cls.predictions.decoder.weight'''] UpperCAmelCase_ = state_dict['''cls.predictions.bias'''] if args.vocab_transform: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[F"cls.predictions.transform.dense.{w}"] UpperCAmelCase_ = state_dict[F"cls.predictions.transform.LayerNorm.{w}"] print(F"N layers selected for distillation: {std_idx}") print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(F"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
706
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def UpperCamelCase ( *lowerCAmelCase_ ) -> Dict: '''simple docstring''' with open(lowerCAmelCase_ , 'r' ) as fh: fcntl.flock(lowerCAmelCase_ , fcntl.LOCK_EX ) try: print(*lowerCAmelCase_ ) finally: fcntl.flock(lowerCAmelCase_ , fcntl.LOCK_UN ) UpperCAmelCase_ = int(os.environ['''LOCAL_RANK''']) torch.cuda.set_device(local_rank) UpperCAmelCase_ = torch.device('''cuda''', local_rank) UpperCAmelCase_ = socket.gethostname() UpperCAmelCase_ = F"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group('''nccl''') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank UpperCAmelCase_ = dist.get_rank() UpperCAmelCase_ = dist.get_world_size() printflock(F"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(F"{gpu} is broken") raise
476
0
"""simple docstring""" SCREAMING_SNAKE_CASE__ : dict[str, float] ={ "km/h": 1.0, "m/s": 3.6, "mph": 1.60_9344, "knot": 1.852, } SCREAMING_SNAKE_CASE__ : dict[str, float] ={ "km/h": 1.0, "m/s": 0.2_7777_7778, "mph": 0.6_2137_1192, "knot": 0.5_3995_6803, } def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->float: if unit_to not in speed_chart or unit_from not in speed_chart_inverse: _lowerCamelCase : Any = ( F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n''' F'''Valid values are: {", ".join(SCREAMING_SNAKE_CASE_ )}''' ) raise ValueError(SCREAMING_SNAKE_CASE_ ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
434
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE__ : Dict ={ 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple =['PerceiverFeatureExtractor'] SCREAMING_SNAKE_CASE__ : int =['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Union[str, Any] =[ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
434
1
"""simple docstring""" import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] ) -> Tuple: # Initialise PyTorch model lowerCamelCase_ : List[Any] =RemBertConfig.from_json_file(UpperCAmelCase__ ) print("Building PyTorch model from configuration: {}".format(str(UpperCAmelCase__ ) ) ) lowerCamelCase_ : Any =RemBertModel(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_rembert(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print("Save PyTorch model to {}".format(UpperCAmelCase__ ) ) torch.save(model.state_dict() , UpperCAmelCase__ ) if __name__ == "__main__": A__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--rembert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained RemBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) A__ : str = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
708
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer A__ : List[str] = logging.get_logger(__name__) A__ : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} A__ : List[str] = { 'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'}, 'tokenizer_file': { 'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json' }, } A__ : List[Any] = {'mobilebert-uncased': 512} A__ : List[Any] = {} class lowercase__ ( snake_case__ ): _UpperCAmelCase :List[Any] = VOCAB_FILES_NAMES _UpperCAmelCase :str = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :Dict = PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase :int = MobileBertTokenizer def __init__( self : Tuple , snake_case__ : Any=None , snake_case__ : Any=None , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]="[UNK]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : Any="[PAD]" , snake_case__ : int="[CLS]" , snake_case__ : int="[MASK]" , snake_case__ : Optional[Any]=True , snake_case__ : int=None , **snake_case__ : List[Any] , ): super().__init__( snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , ) lowerCamelCase_ : Optional[int] =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars ): lowerCamelCase_ : str =getattr(snake_case__ , normalizer_state.pop("type" ) ) lowerCamelCase_ : Union[str, Any] =do_lower_case lowerCamelCase_ : List[Any] =strip_accents lowerCamelCase_ : List[Any] =tokenize_chinese_chars lowerCamelCase_ : Optional[Any] =normalizer_class(**snake_case__ ) lowerCamelCase_ : int =do_lower_case def UpperCAmelCase__ ( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : Dict=None ): lowerCamelCase_ : Optional[int] =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase__ ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): lowerCamelCase_ : Optional[Any] =[self.sep_token_id] lowerCamelCase_ : Optional[Any] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ): lowerCamelCase_ : Optional[Any] =self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ )
244
0
def lowerCAmelCase__ ( lowerCamelCase_ : str): '''simple docstring''' lowerCAmelCase__ : int = 1 for i in range(1 ,num + 1): fact *= i return fact def lowerCAmelCase__ ( lowerCamelCase_ : Tuple): '''simple docstring''' lowerCAmelCase__ : Dict = 0 while number > 0: lowerCAmelCase__ : List[str] = number % 10 sum_of_digits += last_digit lowerCAmelCase__ : Any = number // 10 # Removing the last_digit from the given number return sum_of_digits def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int] = 100): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = factorial(__snake_case) lowerCAmelCase__ : int = split_and_add(__snake_case) return result if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
647
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class _UpperCAmelCase: def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , __a=0 , ) -> Any: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_input_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_labels _UpperCamelCase = num_choices _UpperCamelCase = scope _UpperCamelCase = projection_dim def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length]) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices) _UpperCamelCase = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , ) _UpperCamelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict()) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]: '''simple docstring''' _UpperCamelCase = TFDPRContextEncoder(config=__a) _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a) _UpperCamelCase = model(__a , token_type_ids=__a) _UpperCamelCase = model(__a) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = TFDPRQuestionEncoder(config=__a) _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a) _UpperCamelCase = model(__a , token_type_ids=__a) _UpperCamelCase = model(__a) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict: '''simple docstring''' _UpperCamelCase = TFDPRReader(config=__a) _UpperCamelCase = model(__a , attention_mask=__a) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,)) def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids} return config, inputs_dict @require_tf class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase__ = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) lowercase__ = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {} lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = TFDPRModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*__a) def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*__a) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*__a) @slow def UpperCAmelCase ( self) -> str: '''simple docstring''' for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a) self.assertIsNotNone(__a) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a) self.assertIsNotNone(__a) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = TFDPRQuestionEncoder.from_pretrained(__a) self.assertIsNotNone(__a) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = TFDPRReader.from_pretrained(__a) self.assertIsNotNone(__a) @require_tf class _UpperCAmelCase( unittest.TestCase ): @slow def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''') _UpperCamelCase = tf.constant( [[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]]) # [CLS] hello, is my dog cute? [SEP] _UpperCamelCase = model(__a)[0] # embedding shape = (1, 768) # compare the actual values for a slice. _UpperCamelCase = tf.constant( [ [ 0.0323_6253, 0.1275_3335, 0.1681_8509, 0.0027_9786, 0.389_6933, 0.2426_4945, 0.217_8971, -0.0233_5227, -0.0848_1959, -0.1432_4117, ] ]) self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4))
19
0
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowercase : @property def a ( self ): return self.get_dummy_input() @property def a ( self ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def a ( self , snake_case=True , snake_case=False , snake_case=False , snake_case=False , ): snake_case_ = 4 snake_case_ = 32 snake_case_ = (32, 32) snake_case_ = torch.manual_seed(0 ) snake_case_ = torch.device(A__ ) snake_case_ = (batch_size, num_channels) + sizes snake_case_ = randn_tensor(A__ , generator=A__ , device=A__ ) snake_case_ = {"""hidden_states""": hidden_states} if include_temb: snake_case_ = 128 snake_case_ = randn_tensor((batch_size, temb_channels) , generator=A__ , device=A__ ) if include_res_hidden_states_tuple: snake_case_ = torch.manual_seed(1 ) snake_case_ = (randn_tensor(A__ , generator=A__ , device=A__ ),) if include_encoder_hidden_states: snake_case_ = floats_tensor((batch_size, 32, 32) ).to(A__ ) if include_skip_sample: snake_case_ = randn_tensor(((batch_size, 3) + sizes) , generator=A__ , device=A__ ) return dummy_input def a ( self ): snake_case_ = { """in_channels""": 32, """out_channels""": 32, """temb_channels""": 128, } if self.block_type == "up": snake_case_ = 32 if self.block_type == "mid": init_dict.pop('out_channels' ) snake_case_ = self.dummy_input return init_dict, inputs_dict def a ( self , snake_case ): snake_case_ = self.prepare_init_args_and_inputs_for_common() snake_case_ = self.block_class(**A__ ) unet_block.to(A__ ) unet_block.eval() with torch.no_grad(): snake_case_ = unet_block(**A__ ) if isinstance(A__ , A__ ): snake_case_ = output[0] self.assertEqual(output.shape , self.output_shape ) snake_case_ = output[0, -1, -3:, -3:] snake_case_ = torch.tensor(A__ ).to(A__ ) assert torch_all_close(output_slice.flatten() , A__ , atol=5e-3 ) @unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' ) def a ( self ): snake_case_ = self.prepare_init_args_and_inputs_for_common() snake_case_ = self.block_class(**A__ ) model.to(A__ ) model.train() snake_case_ = model(**A__ ) if isinstance(A__ , A__ ): snake_case_ = output[0] snake_case_ = torch.device(A__ ) snake_case_ = randn_tensor(output.shape , device=A__ ) snake_case_ = torch.nn.functional.mse_loss(A__ , A__ ) loss.backward()
720
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : int = logging.get_logger(__name__) _UpperCAmelCase : Any = { """facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""", # See all LeViT models at https://huggingface.co/models?filter=levit } class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : int = '''levit''' def __init__( self , snake_case=224 , snake_case=3 , snake_case=3 , snake_case=2 , snake_case=1 , snake_case=16 , snake_case=[128, 256, 384] , snake_case=[4, 8, 12] , snake_case=[4, 4, 4] , snake_case=[16, 16, 16] , snake_case=0 , snake_case=[2, 2, 2] , snake_case=[2, 2, 2] , snake_case=0.02 , **snake_case , ): super().__init__(**snake_case ) snake_case_ = image_size snake_case_ = num_channels snake_case_ = kernel_size snake_case_ = stride snake_case_ = padding snake_case_ = hidden_sizes snake_case_ = num_attention_heads snake_case_ = depths snake_case_ = key_dim snake_case_ = drop_path_rate snake_case_ = patch_size snake_case_ = attention_ratio snake_case_ = mlp_ratio snake_case_ = initializer_range snake_case_ = [ ['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Dict = version.parse('''1.11''' ) @property def a ( self ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def a ( self ): return 1e-4
108
0
from math import pow def _snake_case (_snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , ) -> tuple[int, int]: if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _lowercase =int(pow(UpperCAmelCase__ , UpperCAmelCase__)) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _lowercase =backtrack( UpperCAmelCase__ , UpperCAmelCase__ , current_number + 1 , UpperCAmelCase__ , UpperCAmelCase__) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _lowercase =backtrack( UpperCAmelCase__ , UpperCAmelCase__ , current_number + 1 , UpperCAmelCase__ , UpperCAmelCase__) return current_sum, solutions_count def _snake_case (_snake_case : int , _snake_case : int) -> int: if not (1 <= needed_sum <= 1000 and 2 <= power <= 10): raise ValueError( 'Invalid input\n' 'needed_sum must be between 1 and 1000, power between 2 and 10.') return backtrack(UpperCAmelCase__ , UpperCAmelCase__ , 1 , 0 , 0)[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
181
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = { '''configuration_blenderbot_small''': [ '''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotSmallConfig''', '''BlenderbotSmallOnnxConfig''', ], '''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ['''BlenderbotSmallTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotSmallForCausalLM''', '''BlenderbotSmallForConditionalGeneration''', '''BlenderbotSmallModel''', '''BlenderbotSmallPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''TFBlenderbotSmallForConditionalGeneration''', '''TFBlenderbotSmallModel''', '''TFBlenderbotSmallPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''FlaxBlenderbotSmallForConditionalGeneration''', '''FlaxBlenderbotSmallModel''', '''FlaxBlenderbotSmallPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
209
0
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class a__ ( _snake_case , unittest.TestCase ): """simple docstring""" A__ : List[str] = CTRLTokenizer A__ : List[str] = False A__ : Tuple = False def __UpperCAmelCase ( self :Tuple ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>'] lowercase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) lowercase = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', ''] lowercase = {'unk_token': '<unk>'} lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowercase__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowercase__ ) ) def __UpperCAmelCase ( self :Optional[int] , **lowercase__ :str ): kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowercase__ ) def __UpperCAmelCase ( self :Tuple , lowercase__ :Optional[int] ): lowercase = 'adapt react readapt apt' lowercase = 'adapt react readapt apt' return input_text, output_text def __UpperCAmelCase ( self :Dict ): lowercase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase = 'adapt react readapt apt' lowercase = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split() lowercase = tokenizer.tokenize(lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) lowercase = tokens + [tokenizer.unk_token] lowercase = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
314
import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __magic_name__ = logging.get_logger(__name__) def __snake_case ( _UpperCAmelCase ): """simple docstring""" lowercase = OrderedDict() for key, value in state_dict.items(): if key.startswith('module.encoder' ): lowercase = key.replace('module.encoder' , 'glpn.encoder' ) if key.startswith('module.decoder' ): lowercase = key.replace('module.decoder' , 'decoder.stages' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowercase = key[key.find('patch_embed' ) + len('patch_embed' )] lowercase = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(_UpperCAmelCase )-1}""" ) if "norm" in key: lowercase = key.replace('norm' , 'layer_norm' ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowercase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )] lowercase = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(_UpperCAmelCase )-1}""" ) if "layer_norm1" in key: lowercase = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: lowercase = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 lowercase = key[key.find('block' ) + len('block' )] lowercase = key.replace(f"""block{idx}""" , f"""block.{int(_UpperCAmelCase )-1}""" ) if "attn.q" in key: lowercase = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: lowercase = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: lowercase = key.replace('attn' , 'attention.self' ) if "fc1" in key: lowercase = key.replace('fc1' , 'dense1' ) if "fc2" in key: lowercase = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: lowercase = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: lowercase = key.replace('linear_fuse.conv' , 'linear_fuse' ) lowercase = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowercase = key[key.find('linear_c' ) + len('linear_c' )] lowercase = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(_UpperCAmelCase )-1}""" ) if "bot_conv" in key: lowercase = key.replace('bot_conv' , '0.convolution' ) if "skip_conv1" in key: lowercase = key.replace('skip_conv1' , '1.convolution' ) if "skip_conv2" in key: lowercase = key.replace('skip_conv2' , '2.convolution' ) if "fusion1" in key: lowercase = key.replace('fusion1' , '1.fusion' ) if "fusion2" in key: lowercase = key.replace('fusion2' , '2.fusion' ) if "fusion3" in key: lowercase = key.replace('fusion3' , '3.fusion' ) if "fusion" in key and "conv" in key: lowercase = key.replace('conv' , 'convolutional_layer' ) if key.startswith('module.last_layer_depth' ): lowercase = key.replace('module.last_layer_depth' , 'head.head' ) lowercase = value return new_state_dict def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowercase = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) lowercase = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict lowercase = kv_weight[ : config.hidden_sizes[i], : ] lowercase = kv_bias[: config.hidden_sizes[i]] lowercase = kv_weight[ config.hidden_sizes[i] :, : ] lowercase = kv_bias[config.hidden_sizes[i] :] def __snake_case ( ): """simple docstring""" lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ) return image @torch.no_grad() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=None ): """simple docstring""" lowercase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) lowercase = GLPNImageProcessor() # prepare image lowercase = prepare_img() lowercase = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values logger.info('Converting model...' ) # load original state dict lowercase = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) ) # rename keys lowercase = rename_keys(_UpperCAmelCase ) # key and value matrices need special treatment read_in_k_v(_UpperCAmelCase , _UpperCAmelCase ) # create HuggingFace model and load state dict lowercase = GLPNForDepthEstimation(_UpperCAmelCase ) model.load_state_dict(_UpperCAmelCase ) model.eval() # forward pass lowercase = model(_UpperCAmelCase ) lowercase = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowercase = torch.tensor( [[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] ) elif "kitti" in model_name: lowercase = torch.tensor( [[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) lowercase = torch.Size([1, 4_80, 6_40] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , _UpperCAmelCase , atol=1e-4 ) print('Looks ok!' ) # finally, push to hub if required if push_to_hub: logger.info('Pushing model and image processor to the hub...' ) model.push_to_hub( repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , ) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) parser.add_argument( '''--model_name''', default='''glpn-kitti''', type=str, help='''Name of the model in case you\'re pushing to the hub.''', ) __magic_name__ = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
314
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) a_ = { """configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""ConvNextFeatureExtractor"""] a_ = ["""ConvNextImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ """CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvNextForImageClassification""", """ConvNextModel""", """ConvNextPreTrainedModel""", """ConvNextBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ """TFConvNextForImageClassification""", """TFConvNextModel""", """TFConvNextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
175
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging a_ = logging.get_logger(__name__) a_ = { """t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""", } class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = """t5""" lowerCAmelCase__ = ["""past_key_values"""] lowerCAmelCase__ = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , __UpperCAmelCase=32128 , __UpperCAmelCase=512 , __UpperCAmelCase=64 , __UpperCAmelCase=2048 , __UpperCAmelCase=6 , __UpperCAmelCase=None , __UpperCAmelCase=8 , __UpperCAmelCase=32 , __UpperCAmelCase=128 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=1.0 , __UpperCAmelCase="relu" , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = vocab_size __lowerCamelCase = d_model __lowerCamelCase = d_kv __lowerCamelCase = d_ff __lowerCamelCase = num_layers __lowerCamelCase = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __lowerCamelCase = num_heads __lowerCamelCase = relative_attention_num_buckets __lowerCamelCase = relative_attention_max_distance __lowerCamelCase = dropout_rate __lowerCamelCase = layer_norm_epsilon __lowerCamelCase = initializer_factor __lowerCamelCase = feed_forward_proj __lowerCamelCase = use_cache __lowerCamelCase = self.feed_forward_proj.split('''-''' ) __lowerCamelCase = act_info[-1] __lowerCamelCase = act_info[0] == '''gated''' if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __lowerCamelCase = '''gelu_new''' super().__init__( pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , ) class __lowerCAmelCase ( lowerCAmelCase__ ): @property def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = { '''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''}, '''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''}, } if self.use_past: __lowerCamelCase = '''past_encoder_sequence + sequence''' __lowerCamelCase = {0: '''batch'''} __lowerCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: __lowerCamelCase = {0: '''batch''', 1: '''decoder_sequence'''} __lowerCamelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) return common_inputs @property def lowerCamelCase ( self ): '''simple docstring''' return 13
175
1
'''simple docstring''' def __a ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ) -> int: '''simple docstring''' lowercase_ = (boundary[1] - boundary[0]) / steps lowercase_ = boundary[0] lowercase_ = boundary[1] lowercase_ = make_points(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowercase_ = 0.0 y += (h / 2.0) * f(__lowerCamelCase ) for i in x_i: # print(i) y += h * f(__lowerCamelCase ) y += (h / 2.0) * f(__lowerCamelCase ) return y def __a ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ) -> Dict: '''simple docstring''' lowercase_ = a + h while x < (b - h): yield x lowercase_ = x + h def __a ( __lowerCamelCase : List[str] ) -> str: # enter your function here '''simple docstring''' lowercase_ = (x - 0) * (x - 0) return y def __a ( ) -> Optional[int]: '''simple docstring''' lowercase_ = 0.0 # Lower bound of integration lowercase_ = 1.0 # Upper bound of integration lowercase_ = 1_0.0 # define number of steps or resolution lowercase_ = [a, b] # define boundary of integration lowercase_ = method_a(__lowerCamelCase , __lowerCamelCase ) print(f'y = {y}' ) if __name__ == "__main__": main()
720
'''simple docstring''' # Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar lowerCAmelCase_ : Dict = TypeVar("T") class lowercase ( Generic[T] ): def __init__( self : int , __lowerCAmelCase : bool = True) -> None: lowercase_ = {} # dictionary of lists lowercase_ = directed def __UpperCAmelCase ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : T) -> GraphAdjacencyList[T]: if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCAmelCase) self.adj_list[destination_vertex].append(__lowerCAmelCase) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCAmelCase) lowercase_ = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(__lowerCAmelCase) lowercase_ = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: lowercase_ = [destination_vertex] lowercase_ = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCAmelCase) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCAmelCase) lowercase_ = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: lowercase_ = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: lowercase_ = [destination_vertex] lowercase_ = [] return self def __repr__( self : str) -> str: return pformat(self.adj_list)
461
0
'''simple docstring''' import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset UpperCamelCase_ = pd.read_csv( """https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/""" """position_salaries.csv""" ) UpperCamelCase_ = dataset.iloc[:, 1:2].values UpperCamelCase_ = dataset.iloc[:, 2].values UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = train_test_split(X, y, test_size=0.2, random_state=0) UpperCamelCase_ = PolynomialFeatures(degree=4) UpperCamelCase_ = poly_reg.fit_transform(X) UpperCamelCase_ = LinearRegression() pol_reg.fit(X_poly, y) def _lowerCAmelCase ( ) -> str: plt.scatter(__magic_name__ , __magic_name__ , color='''red''' ) plt.plot(__magic_name__ , pol_reg.predict(poly_reg.fit_transform(__magic_name__ ) ) , color='''blue''' ) plt.title('''Truth or Bluff (Linear Regression)''' ) plt.xlabel('''Position level''' ) plt.ylabel('''Salary''' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
92
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> str: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" UpperCamelCase_ = False if num < 0: UpperCamelCase_ = True UpperCamelCase_ = -num UpperCamelCase_ = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(SCREAMING_SNAKE_CASE_ ) for e in binary ) return "0b" + "".join(str(SCREAMING_SNAKE_CASE_ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
628
0
"""simple docstring""" import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def snake_case ( A__ ,A__=False ): UpperCAmelCase_ : List[str] = OmegaConf.load(A__ ) if display: print(yaml.dump(OmegaConf.to_container(A__ ) ) ) return config def snake_case ( A__ ,A__=None ,A__=None ): if conf_path is None: UpperCAmelCase_ : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml" UpperCAmelCase_ : int = load_config(A__ ,display=A__ ) UpperCAmelCase_ : Tuple = VQModel(**config.model.params ) if ckpt_path is None: UpperCAmelCase_ : Tuple = "./model_checkpoints/vqgan_only.pt" UpperCAmelCase_ : Optional[int] = torch.load(A__ ,map_location=A__ ) if ".ckpt" in ckpt_path: UpperCAmelCase_ : List[str] = sd["state_dict"] model.load_state_dict(A__ ,strict=A__ ) model.to(A__ ) del sd return model def snake_case ( A__ ,A__ ): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = model.encode(A__ ) print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" ) UpperCAmelCase_ : Union[str, Any] = model.decode(A__ ) return xrec def snake_case ( A__ ,A__=False ): UpperCAmelCase_ , UpperCAmelCase_ : Tuple = string.rsplit("." ,1 ) if reload: UpperCAmelCase_ : Optional[int] = importlib.import_module(A__ ) importlib.reload(A__ ) return getattr(importlib.import_module(A__ ,package=A__ ) ,cls ) def snake_case ( A__ ): if "target" not in config: raise KeyError("Expected key `target` to instantiate." ) return get_obj_from_str(config["target"] )(**config.get("params" ,{} ) ) def snake_case ( A__ ,A__ ,A__=True ,A__=True ): UpperCAmelCase_ : Optional[int] = instantiate_from_config(A__ ) if sd is not None: model.load_state_dict(A__ ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def snake_case ( A__ ,A__ ,A__ ,A__ ): # load the specified checkpoint if ckpt: UpperCAmelCase_ : Union[str, Any] = torch.load(A__ ,map_location="cpu" ) UpperCAmelCase_ : List[str] = pl_sd["global_step"] print(F"""loaded model from global step {global_step}.""" ) else: UpperCAmelCase_ : Dict = {"state_dict": None} UpperCAmelCase_ : int = None UpperCAmelCase_ : Dict = load_model_from_config(config.model ,pl_sd["state_dict"] ,gpu=A__ ,eval_mode=A__ )["model"] return model, global_step
463
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def snake_case ( A__ ): UpperCAmelCase_ : Tuple = [] embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""", F"""stage{idx}.patch_embed.proj.weight""", ) ) embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""", F"""stage{idx}.patch_embed.proj.bias""", ) ) embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""", F"""stage{idx}.patch_embed.norm.weight""", ) ) embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""", F"""stage{idx}.patch_embed.norm.bias""", ) ) return embed def snake_case ( A__ ,A__ ): UpperCAmelCase_ : List[Any] = [] attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj.bias""", ) ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") ) return attention_weights def snake_case ( A__ ): UpperCAmelCase_ : Union[str, Any] = [] token.append((F"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") ) return token def snake_case ( ): UpperCAmelCase_ : str = [] head.append(("layernorm.weight", "norm.weight") ) head.append(("layernorm.bias", "norm.bias") ) head.append(("classifier.weight", "head.weight") ) head.append(("classifier.bias", "head.bias") ) return head def snake_case ( A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ : int = "imagenet-1k-id2label.json" UpperCAmelCase_ : Union[str, Any] = 10_00 UpperCAmelCase_ : Any = "huggingface/label-files" UpperCAmelCase_ : Optional[int] = num_labels UpperCAmelCase_ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(A__ ,A__ ,repo_type="dataset" ) ) ,"r" ) ) UpperCAmelCase_ : Optional[int] = {int(A__ ): v for k, v in idalabel.items()} UpperCAmelCase_ : Optional[int] = idalabel UpperCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()} UpperCAmelCase_ : str = CvtConfig(num_labels=A__ ,idalabel=A__ ,labelaid=A__ ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" ,1 )[-1][4:6] == "13": UpperCAmelCase_ : Optional[int] = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" ,1 )[-1][4:6] == "21": UpperCAmelCase_ : str = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: UpperCAmelCase_ : Optional[int] = [2, 2, 20] UpperCAmelCase_ : List[str] = [3, 12, 16] UpperCAmelCase_ : Optional[Any] = [1_92, 7_68, 10_24] UpperCAmelCase_ : Tuple = CvtForImageClassification(A__ ) UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) UpperCAmelCase_ : int = image_size UpperCAmelCase_ : List[str] = torch.load(A__ ,map_location=torch.device("cpu" ) ) UpperCAmelCase_ : int = OrderedDict() UpperCAmelCase_ : Optional[Any] = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: UpperCAmelCase_ : Optional[Any] = list_of_state_dict + cls_token(A__ ) UpperCAmelCase_ : Any = list_of_state_dict + embeddings(A__ ) for cnt in range(config.depth[idx] ): UpperCAmelCase_ : Dict = list_of_state_dict + attention(A__ ,A__ ) UpperCAmelCase_ : Union[str, Any] = list_of_state_dict + final() for gg in list_of_state_dict: print(A__ ) for i in range(len(A__ ) ): UpperCAmelCase_ : Any = original_weights[list_of_state_dict[i][1]] model.load_state_dict(A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) lowerCamelCase_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
463
1
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py __lowerCAmelCase : Optional[Any] = """.""" # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) __lowerCAmelCase : Optional[Any] = [ """Assert""", """AssignVariableOp""", """EmptyTensorList""", """MergeV2Checkpoints""", """ReadVariableOp""", """ResourceGather""", """RestoreV2""", """SaveV2""", """ShardedFilename""", """StatefulPartitionedCall""", """StaticRegexFullMatch""", """VarHandleOp""", ] def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = SavedModel() lowerCAmelCase__ = [] with open(os.path.join(lowerCamelCase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f: lowerCAmelCase__ = json.load(lowerCamelCase__ )['''opsets'''] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(lowerCamelCase__ )] ) with open(lowerCamelCase__ , """rb""" ) as f: saved_model.ParseFromString(f.read() ) lowerCAmelCase__ = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want lowerCAmelCase__ = sorted(lowerCamelCase__ ) lowerCAmelCase__ = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(lowerCamelCase__ ) if strict and len(lowerCamelCase__ ) > 0: raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops ) elif len(lowerCamelCase__ ) > 0: print(f"""Found the following incompatible ops for the opset {opset}:""" ) print(*lowerCamelCase__ , sep="""\n""" ) else: print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" ) if __name__ == "__main__": __lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).") parser.add_argument( "--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested." ) parser.add_argument( "--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model." ) parser.add_argument( "--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)" ) __lowerCAmelCase : Optional[Any] = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
644
'''simple docstring''' from collections.abc import Iterable from typing import Any class UpperCAmelCase : """simple docstring""" def __init__( self , _snake_case = None ) -> Optional[int]: _UpperCamelCase : int = value _UpperCamelCase : Node | None = None # Added in order to delete a node easier _UpperCamelCase : Node | None = None _UpperCamelCase : Node | None = None def __repr__( self ) -> str: from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 ) class UpperCAmelCase : """simple docstring""" def __init__( self , _snake_case = None ) -> List[Any]: _UpperCamelCase : str = root def __str__( self ) -> str: return str(self.root ) def _lowercase ( self , _snake_case , _snake_case ) -> None: if new_children is not None: # reset its kids _UpperCamelCase : Union[str, Any] = node.parent if node.parent is not None: # reset its parent if self.is_right(_snake_case ): # If it is the right children _UpperCamelCase : str = new_children else: _UpperCamelCase : Any = new_children else: _UpperCamelCase : Any = new_children def _lowercase ( self , _snake_case ) -> bool: if node.parent and node.parent.right: return node == node.parent.right return False def _lowercase ( self ) -> bool: return self.root is None def _lowercase ( self , _snake_case ) -> None: _UpperCamelCase : List[Any] = Node(_snake_case ) # create a new Node if self.empty(): # if Tree is empty _UpperCamelCase : Optional[Any] = new_node # set its root else: # Tree is not empty _UpperCamelCase : int = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: _UpperCamelCase : Union[str, Any] = new_node # We insert the new node in a leaf break else: _UpperCamelCase : Union[str, Any] = parent_node.left else: if parent_node.right is None: _UpperCamelCase : Any = new_node break else: _UpperCamelCase : str = parent_node.right _UpperCamelCase : Any = parent_node def _lowercase ( self , *_snake_case ) -> None: for value in values: self.__insert(_snake_case ) def _lowercase ( self , _snake_case ) -> Node | None: if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: _UpperCamelCase : List[str] = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: _UpperCamelCase : Optional[Any] = node.left if value < node.value else node.right return node def _lowercase ( self , _snake_case = None ) -> Node | None: if node is None: if self.root is None: return None _UpperCamelCase : Dict = self.root if not self.empty(): while node.right is not None: _UpperCamelCase : Tuple = node.right return node def _lowercase ( self , _snake_case = None ) -> Node | None: if node is None: _UpperCamelCase : Optional[Any] = self.root if self.root is None: return None if not self.empty(): _UpperCamelCase : Optional[int] = self.root while node.left is not None: _UpperCamelCase : List[str] = node.left return node def _lowercase ( self , _snake_case ) -> None: _UpperCamelCase : str = self.search(_snake_case ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(_snake_case , _snake_case ) elif node.left is None: # Has only right children self.__reassign_nodes(_snake_case , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(_snake_case , node.left ) else: _UpperCamelCase : List[str] = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore _UpperCamelCase : int = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def _lowercase ( self , _snake_case ) -> Iterable: if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def _lowercase ( self , _snake_case=None ) -> Any: if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def _lowercase ( self , _snake_case , _snake_case ) -> None: if node: self.inorder(_snake_case , node.left ) arr.append(node.value ) self.inorder(_snake_case , node.right ) def _lowercase ( self , _snake_case , _snake_case ) -> int: _UpperCamelCase : list[int] = [] self.inorder(_snake_case , _snake_case ) # append all values to list using inorder traversal return arr[k - 1] def snake_case__ ( UpperCamelCase ) -> list[Node]: _UpperCamelCase : int = [] if curr_node is not None: _UpperCamelCase : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def snake_case__ ( ) -> None: _UpperCamelCase : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7) _UpperCamelCase : Tuple = BinarySearchTree() for i in testlist: t.insert(UpperCamelCase ) # Prints all the elements of the list in order traversal print(UpperCamelCase ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''' ,t.get_max().value ) # type: ignore print('''Min Value: ''' ,t.get_min().value ) # type: ignore for i in testlist: t.remove(UpperCamelCase ) print(UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
683
0
"""simple docstring""" def __lowerCAmelCase ( lowercase : int , lowercase : int ) -> str: """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError("both inputs must be positive integers" ) snake_case : Optional[Any] = str(bin(lowercase ) ) binary_number += "0" * shift_amount return binary_number def __lowerCAmelCase ( lowercase : int , lowercase : int ) -> str: """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError("both inputs must be positive integers" ) snake_case : Any = str(bin(lowercase ) )[2:] if shift_amount >= len(lowercase ): return "0b0" snake_case : Optional[Any] = binary_number[: len(lowercase ) - shift_amount] return "0b" + shifted_binary_number def __lowerCAmelCase ( lowercase : int , lowercase : int ) -> str: """simple docstring""" if number >= 0: # Get binary representation of positive number snake_case : Tuple = "0" + str(bin(lowercase ) ).strip("-" )[2:] else: # Get binary (2's complement) representation of negative number snake_case : List[str] = len(bin(lowercase )[3:] ) # Find 2's complement of number snake_case : Optional[Any] = bin(abs(lowercase ) - (1 << binary_number_length) )[3:] snake_case : str = ( "1" + "0" * (binary_number_length - len(lowercase )) + binary_number ) if shift_amount >= len(lowercase ): return "0b" + binary_number[0] * len(lowercase ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(lowercase ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
712
"""simple docstring""" import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _lowerCAmelCase ( unittest.TestCase ): @property def lowerCamelCase ( self ) -> Any: '''simple docstring''' torch.manual_seed(0 ) snake_case : Tuple = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def lowerCamelCase ( self ) -> Optional[Any]: '''simple docstring''' snake_case : Optional[Any] = self.dummy_uncond_unet snake_case : str = PNDMScheduler() snake_case : List[Any] = PNDMPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ ) pndm.to(UpperCamelCase__ ) pndm.set_progress_bar_config(disable=UpperCamelCase__ ) snake_case : str = torch.manual_seed(0 ) snake_case : Union[str, Any] = pndm(generator=UpperCamelCase__ , num_inference_steps=20 , output_type="numpy" ).images snake_case : Optional[int] = torch.manual_seed(0 ) snake_case : Any = pndm(generator=UpperCamelCase__ , num_inference_steps=20 , output_type="numpy" , return_dict=UpperCamelCase__ )[0] snake_case : Optional[Any] = image[0, -3:, -3:, -1] snake_case : List[str] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class _lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self ) -> Optional[int]: '''simple docstring''' snake_case : Dict = "google/ddpm-cifar10-32" snake_case : Optional[int] = UNetaDModel.from_pretrained(UpperCamelCase__ ) snake_case : List[str] = PNDMScheduler() snake_case : Union[str, Any] = PNDMPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ ) pndm.to(UpperCamelCase__ ) pndm.set_progress_bar_config(disable=UpperCamelCase__ ) snake_case : int = torch.manual_seed(0 ) snake_case : str = pndm(generator=UpperCamelCase__ , output_type="numpy" ).images snake_case : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case : Optional[Any] = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
117
0
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" _A : Dict = None _A : int = BloomTokenizerFast _A : str = BloomTokenizerFast _A : str = True _A : Dict = False _A : Union[str, Any] = "tokenizer_file" _A : Optional[int] = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"} def __A ( self : Dict ) -> Dict: '''simple docstring''' super().setUp() SCREAMING_SNAKE_CASE__ =BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" ) tokenizer.save_pretrained(self.tmpdirname ) def __A ( self : Any ,**_UpperCamelCase : List[Any] ) -> Union[str, Any]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname ,**a__ ) def __A ( self : List[str] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ =self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ =["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""] SCREAMING_SNAKE_CASE__ =[[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]] SCREAMING_SNAKE_CASE__ =tokenizer.batch_encode_plus(a__ )["""input_ids"""] self.assertListEqual(a__ ,a__ ) SCREAMING_SNAKE_CASE__ =tokenizer.batch_decode(a__ ) self.assertListEqual(a__ ,a__ ) def __A ( self : Any ,_UpperCamelCase : str=6 ) -> Tuple: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): SCREAMING_SNAKE_CASE__ =self.rust_tokenizer_class.from_pretrained(a__ ,**a__ ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input SCREAMING_SNAKE_CASE__ ="""This is a simple input""" SCREAMING_SNAKE_CASE__ =["""This is a simple input 1""", """This is a simple input 2"""] SCREAMING_SNAKE_CASE__ =("""This is a simple input""", """This is a pair""") SCREAMING_SNAKE_CASE__ =[ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests try: tokenizer_r.encode(a__ ,max_length=a__ ) tokenizer_r.encode_plus(a__ ,max_length=a__ ) tokenizer_r.batch_encode_plus(a__ ,max_length=a__ ) tokenizer_r.encode(a__ ,max_length=a__ ) tokenizer_r.batch_encode_plus(a__ ,max_length=a__ ) except ValueError: self.fail("""Bloom Tokenizer should be able to deal with padding""" ) SCREAMING_SNAKE_CASE__ =None # Hotfixing padding = None self.assertRaises(a__ ,tokenizer_r.encode ,a__ ,max_length=a__ ,padding="""max_length""" ) # Simple input self.assertRaises(a__ ,tokenizer_r.encode_plus ,a__ ,max_length=a__ ,padding="""max_length""" ) # Simple input self.assertRaises( a__ ,tokenizer_r.batch_encode_plus ,a__ ,max_length=a__ ,padding="""max_length""" ,) # Pair input self.assertRaises(a__ ,tokenizer_r.encode ,a__ ,max_length=a__ ,padding="""max_length""" ) # Pair input self.assertRaises(a__ ,tokenizer_r.encode_plus ,a__ ,max_length=a__ ,padding="""max_length""" ) # Pair input self.assertRaises( a__ ,tokenizer_r.batch_encode_plus ,a__ ,max_length=a__ ,padding="""max_length""" ,) def __A ( self : str ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE__ =self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ =load_dataset("""xnli""" ,"""all_languages""" ,split="""test""" ,streaming=a__ ) SCREAMING_SNAKE_CASE__ =next(iter(a__ ) )["""premise"""] # pick up one data SCREAMING_SNAKE_CASE__ =list(sample_data.values() ) SCREAMING_SNAKE_CASE__ =list(map(tokenizer.encode ,a__ ) ) SCREAMING_SNAKE_CASE__ =[tokenizer.decode(a__ ,clean_up_tokenization_spaces=a__ ) for x in output_tokens] self.assertListEqual(a__ ,a__ ) def __A ( self : List[Any] ) -> Tuple: '''simple docstring''' self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) ,1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) ,1 )
151
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _a : List[Any] = logging.get_logger(__name__) _a : Dict = { 'microsoft/unispeech-sat-base-100h-libri-ft': ( 'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json' ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class __A ( SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Dict = "unispeech-sat" def __init__( self , a__=32 , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.1 , a__=0.1 , a__=0.0_2 , a__=1e-5 , a__="group" , a__="gelu" , a__=(512, 512, 512, 512, 512, 512, 512) , a__=(5, 2, 2, 2, 2, 2, 2) , a__=(10, 3, 3, 3, 3, 2, 2) , a__=False , a__=128 , a__=16 , a__=False , a__=True , a__=0.0_5 , a__=10 , a__=2 , a__=0.0 , a__=10 , a__=0 , a__=320 , a__=2 , a__=0.1 , a__=100 , a__=256 , a__=256 , a__=0.1 , a__="mean" , a__=False , a__=False , a__=256 , a__=(512, 512, 512, 512, 1500) , a__=(5, 3, 3, 1, 1) , a__=(1, 2, 3, 1, 1) , a__=512 , a__=0 , a__=1 , a__=2 , a__=504 , **a__ , ): super().__init__(**a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ ) _lowerCAmelCase : Any = hidden_size _lowerCAmelCase : int = feat_extract_norm _lowerCAmelCase : Any = feat_extract_activation _lowerCAmelCase : List[Any] = list(a__ ) _lowerCAmelCase : List[str] = list(a__ ) _lowerCAmelCase : Dict = list(a__ ) _lowerCAmelCase : str = conv_bias _lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings _lowerCAmelCase : Union[str, Any] = num_conv_pos_embedding_groups _lowerCAmelCase : int = len(self.conv_dim ) _lowerCAmelCase : Optional[Any] = num_hidden_layers _lowerCAmelCase : int = intermediate_size _lowerCAmelCase : Tuple = hidden_act _lowerCAmelCase : Dict = num_attention_heads _lowerCAmelCase : str = hidden_dropout _lowerCAmelCase : Any = attention_dropout _lowerCAmelCase : Optional[Any] = activation_dropout _lowerCAmelCase : Dict = feat_proj_dropout _lowerCAmelCase : List[str] = final_dropout _lowerCAmelCase : Union[str, Any] = layerdrop _lowerCAmelCase : Union[str, Any] = layer_norm_eps _lowerCAmelCase : str = initializer_range _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : str = num_clusters _lowerCAmelCase : Optional[Any] = do_stable_layer_norm _lowerCAmelCase : Optional[Any] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`," F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCAmelCase : Tuple = apply_spec_augment _lowerCAmelCase : Optional[Any] = mask_time_prob _lowerCAmelCase : List[Any] = mask_time_length _lowerCAmelCase : List[Any] = mask_time_min_masks _lowerCAmelCase : Optional[Any] = mask_feature_prob _lowerCAmelCase : str = mask_feature_length _lowerCAmelCase : Any = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCAmelCase : int = num_codevectors_per_group _lowerCAmelCase : Tuple = num_codevector_groups _lowerCAmelCase : str = contrastive_logits_temperature _lowerCAmelCase : Optional[int] = feat_quantizer_dropout _lowerCAmelCase : Any = num_negatives _lowerCAmelCase : Optional[int] = codevector_dim _lowerCAmelCase : List[Any] = proj_codevector_dim _lowerCAmelCase : Optional[int] = diversity_loss_weight # ctc loss _lowerCAmelCase : Union[str, Any] = ctc_loss_reduction _lowerCAmelCase : List[str] = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCAmelCase : Optional[int] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCAmelCase : int = list(a__ ) _lowerCAmelCase : List[Any] = list(a__ ) _lowerCAmelCase : Union[str, Any] = list(a__ ) _lowerCAmelCase : List[str] = xvector_output_dim @property def __A ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
213
0
import numpy as np class UpperCamelCase: def __init__( self : str ) -> List[str]: '''simple docstring''' __snake_case = (0, 0) __snake_case = None __snake_case = 0 __snake_case = 0 __snake_case = 0 def __eq__( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> int: '''simple docstring''' return self.position == cell.position def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any: '''simple docstring''' print(self.position ) class UpperCamelCase: def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int]=(5, 5) ) -> Tuple: '''simple docstring''' __snake_case = np.zeros(SCREAMING_SNAKE_CASE ) __snake_case = world_size[0] __snake_case = world_size[1] def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' print(self.w ) def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]: '''simple docstring''' __snake_case = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] __snake_case = cell.position[0] __snake_case = cell.position[1] __snake_case = [] for n in neughbour_cord: __snake_case = current_x + n[0] __snake_case = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: __snake_case = Cell() __snake_case = (x, y) __snake_case = cell neighbours.append(SCREAMING_SNAKE_CASE ) return neighbours def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): '''simple docstring''' __snake_case = [] __snake_case = [] _open.append(_lowerCAmelCase ) while _open: __snake_case = np.argmin([n.f for n in _open] ) __snake_case = _open[min_f] _closed.append(_open.pop(_lowerCAmelCase ) ) if current == goal: break for n in world.get_neigbours(_lowerCAmelCase ): for c in _closed: if c == n: continue __snake_case = current.g + 1 __snake_case , __snake_case = n.position __snake_case , __snake_case = goal.position __snake_case = (ya - ya) ** 2 + (xa - xa) ** 2 __snake_case = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(_lowerCAmelCase ) __snake_case = [] while current.parent is not None: path.append(current.position ) __snake_case = current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": A : Any = Gridworld() # Start position and goal A : str = Cell() A : Optional[Any] = (0, 0) A : Optional[Any] = Cell() A : Tuple = (4, 4) print(f'''path from {start.position} to {goal.position}''') A : Tuple = astar(world, start, goal) # Just for visual reasons. for i in s: A : Optional[int] = 1 print(world.w)
716
import itertools import string from collections.abc import Generator, Iterable def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Generator[tuple[str, ...], None, None]: '''simple docstring''' __snake_case = iter(_lowerCAmelCase ) while True: __snake_case = tuple(itertools.islice(_lowerCAmelCase , _lowerCAmelCase ) ) if not chunk: return yield chunk def _lowerCAmelCase ( _lowerCAmelCase ) -> str: '''simple docstring''' __snake_case = "".join([c.upper() for c in dirty if c in string.ascii_letters] ) __snake_case = "" if len(_lowerCAmelCase ) < 2: return dirty for i in range(len(_lowerCAmelCase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_lowerCAmelCase ) & 1: clean += "X" return clean def _lowerCAmelCase ( _lowerCAmelCase ) -> list[str]: '''simple docstring''' __snake_case = "ABCDEFGHIKLMNOPQRSTUVWXYZ" # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler __snake_case = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_lowerCAmelCase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_lowerCAmelCase ) return table def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str: '''simple docstring''' __snake_case = generate_table(_lowerCAmelCase ) __snake_case = prepare_input(_lowerCAmelCase ) __snake_case = "" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowerCAmelCase , 2 ): __snake_case , __snake_case = divmod(table.index(_lowerCAmelCase ) , 5 ) __snake_case , __snake_case = divmod(table.index(_lowerCAmelCase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str: '''simple docstring''' __snake_case = generate_table(_lowerCAmelCase ) __snake_case = "" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowerCAmelCase , 2 ): __snake_case , __snake_case = divmod(table.index(_lowerCAmelCase ) , 5 ) __snake_case , __snake_case = divmod(table.index(_lowerCAmelCase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
473
0
SCREAMING_SNAKE_CASE :Optional[Any] = 8.314462 # Unit - J mol-1 K-1 def UpperCAmelCase ( a_ , a_ , a_ ) -> float: """simple docstring""" if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def UpperCAmelCase ( a_ , a_ , a_ ) -> float: """simple docstring""" if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
55
'''simple docstring''' from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function lowerCamelCase_ = 1.0_5457_1817e-34 # unit of ℏ : J * s lowerCamelCase_ = 3e8 # unit of c : m * s^-1 def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float , __A : float ) -> dict[str, float]: if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: _SCREAMING_SNAKE_CASE = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_40 * (distance) ** 4 ) return {"force": force} elif area == 0: _SCREAMING_SNAKE_CASE = (2_40 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: _SCREAMING_SNAKE_CASE = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
418
0
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ ): for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ): if numbers[j] < numbers[i]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = numbers[j], numbers[i] return numbers if __name__ == "__main__": __A : Optional[Any] = input("""Enter numbers separated by a comma:\n""").strip() __A : Tuple = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
715
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' SCREAMING_SNAKE_CASE = args.log_outputs SCREAMING_SNAKE_CASE = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] ) # load metric SCREAMING_SNAKE_CASE = load_metric("""wer""" ) SCREAMING_SNAKE_CASE = load_metric("""cer""" ) # compute metrics SCREAMING_SNAKE_CASE = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] ) SCREAMING_SNAKE_CASE = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] ) # print & log results SCREAMING_SNAKE_CASE = f"""WER: {wer_result}\nCER: {cer_result}""" print(SCREAMING_SNAKE_CASE ) with open(f"""{dataset_id}_eval_results.txt""" , """w""" ) as f: f.write(SCREAMING_SNAKE_CASE ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: SCREAMING_SNAKE_CASE = f"""log_{dataset_id}_predictions.txt""" SCREAMING_SNAKE_CASE = f"""log_{dataset_id}_targets.txt""" with open(SCREAMING_SNAKE_CASE , """w""" ) as p, open(SCREAMING_SNAKE_CASE , """w""" ) as t: # mapping function to write output def write_to_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): p.write(f"""{i}""" + """\n""" ) p.write(batch["""prediction"""] + """\n""" ) t.write(f"""{i}""" + """\n""" ) t.write(batch["""target"""] + """\n""" ) result.map(SCREAMING_SNAKE_CASE , with_indices=SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' SCREAMING_SNAKE_CASE = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training SCREAMING_SNAKE_CASE = re.sub(SCREAMING_SNAKE_CASE , """""" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! SCREAMING_SNAKE_CASE = ["""\n\n""", """\n""", """ """, """ """] for t in token_sequences_to_ignore: SCREAMING_SNAKE_CASE = """ """.join(text.split(SCREAMING_SNAKE_CASE ) ) return text def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' # load dataset SCREAMING_SNAKE_CASE = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=SCREAMING_SNAKE_CASE ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(args.model_id ) SCREAMING_SNAKE_CASE = feature_extractor.sampling_rate # resample audio SCREAMING_SNAKE_CASE = dataset.cast_column("""audio""" , Audio(sampling_rate=SCREAMING_SNAKE_CASE ) ) # load eval pipeline if args.device is None: SCREAMING_SNAKE_CASE = 0 if torch.cuda.is_available() else -1 SCREAMING_SNAKE_CASE = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE = asr( batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) SCREAMING_SNAKE_CASE = prediction["""text"""] SCREAMING_SNAKE_CASE = normalize_text(batch["""sentence"""] ) return batch # run inference on all examples SCREAMING_SNAKE_CASE = dataset.map(SCREAMING_SNAKE_CASE , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A : List[Any] = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) __A : Tuple = parser.parse_args() main(args)
450
0
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[int] = [ ["""attention""", """attn"""], ["""encoder_attention""", """encoder_attn"""], ["""q_lin""", """q_proj"""], ["""k_lin""", """k_proj"""], ["""v_lin""", """v_proj"""], ["""out_lin""", """out_proj"""], ["""norm_embeddings""", """layernorm_embedding"""], ["""position_embeddings""", """embed_positions"""], ["""embeddings""", """embed_tokens"""], ["""ffn.lin""", """fc"""], ] def __lowercase ( snake_case ): """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: __magic_name__ :Any = k.replace(snake_case, snake_case ) if k.startswith('''encoder''' ): __magic_name__ :str = k.replace('''.attn''', '''.self_attn''' ) __magic_name__ :Dict = k.replace('''norm1''', '''self_attn_layer_norm''' ) __magic_name__ :Dict = k.replace('''norm2''', '''final_layer_norm''' ) elif k.startswith('''decoder''' ): __magic_name__ :Any = k.replace('''norm1''', '''self_attn_layer_norm''' ) __magic_name__ :Dict = k.replace('''norm2''', '''encoder_attn_layer_norm''' ) __magic_name__ :Optional[Any] = k.replace('''norm3''', '''final_layer_norm''' ) return k def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :List[Any] = [ '''model.encoder.layernorm_embedding.weight''', '''model.encoder.layernorm_embedding.bias''', '''model.decoder.layernorm_embedding.weight''', '''model.decoder.layernorm_embedding.bias''', ] for k in keys: __magic_name__ :Dict = sd.pop(snake_case ) __magic_name__ :int = k.replace('''layernorm_embedding''', '''layer_norm''' ) assert new_k not in sd __magic_name__ :Optional[Any] = v SCREAMING_SNAKE_CASE__ : List[str] = ["""START"""] @torch.no_grad() def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Union[str, Any] = torch.load(snake_case, map_location='''cpu''' ) __magic_name__ :Tuple = model['''model'''] __magic_name__ :Dict = BlenderbotConfig.from_json_file(snake_case ) __magic_name__ :str = BlenderbotForConditionalGeneration(snake_case ) __magic_name__ :Tuple = m.model.state_dict().keys() __magic_name__ :Optional[int] = [] __magic_name__ :int = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue __magic_name__ :Dict = rename_state_dict_key(snake_case ) if new_k not in valid_keys: failures.append([k, new_k] ) else: __magic_name__ :Optional[Any] = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(snake_case ) m.model.load_state_dict(snake_case, strict=snake_case ) m.half() m.save_pretrained(snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""") parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""") parser.add_argument( """--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
0
from __future__ import annotations def UpperCamelCase ( _a , _a = None , _a = None ) -> None: '''simple docstring''' if start is None: lowercase_ :Optional[int] = 0 if end is None: lowercase_ :Any = len(_a ) - 1 if start >= end: return lowercase_ :Dict = (start + end) // 2 slowsort(_a , _a , _a ) slowsort(_a , mid + 1 , _a ) if sequence[end] < sequence[mid]: lowercase_ , lowercase_ :Any = sequence[mid], sequence[end] slowsort(_a , _a , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
257
0
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer _A : Optional[Any] = logging.get_logger(__name__) _A : List[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _A : Dict = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _A : Dict = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _A : Any = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } _A : Tuple = { """facebook/dpr-ctx_encoder-single-nq-base""": 5_12, """facebook/dpr-ctx_encoder-multiset-base""": 5_12, } _A : int = { """facebook/dpr-question_encoder-single-nq-base""": 5_12, """facebook/dpr-question_encoder-multiset-base""": 5_12, } _A : Any = { """facebook/dpr-reader-single-nq-base""": 5_12, """facebook/dpr-reader-multiset-base""": 5_12, } _A : str = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } _A : Any = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } _A : List[Any] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class a__ ( a_ ): __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION __lowerCAmelCase = DPRContextEncoderTokenizer class a__ ( a_ ): __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __lowerCAmelCase = DPRQuestionEncoderTokenizer _A : Any = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) _A : Tuple = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) _A : Tuple = r""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(a_ ) class a__ : def __call__( self , _a , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , **_a , ): if titles is None and texts is None: return super().__call__( _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , ) elif titles is None or texts is None: lowercase : List[Any] = titles if texts is None else texts return super().__call__( _a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , ) lowercase : str = titles if not isinstance(_a , _a ) else [titles] lowercase : Union[str, Any] = texts if not isinstance(_a , _a ) else [texts] lowercase : Optional[Any] = len(_a ) lowercase : List[Any] = questions if not isinstance(_a , _a ) else [questions] * n_passages assert len(_a ) == len( _a ), f"""There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.""" lowercase : Any = super().__call__(_a , _a , padding=_a , truncation=_a )["input_ids"] lowercase : Dict = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )["input_ids"] lowercase : Union[str, Any] = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_a , _a ) ] } if return_attention_mask is not False: lowercase : Optional[int] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) lowercase : Union[str, Any] = attention_mask return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a ) def __magic_name__ ( self , _a , _a , _a = 16 , _a = 64 , _a = 4 , ): lowercase : List[Any] = reader_input["input_ids"] lowercase , lowercase , lowercase : List[str] = reader_output[:3] lowercase : Optional[Any] = len(_a ) lowercase : Union[str, Any] = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ ) lowercase : List[DPRReaderOutput] = [] for doc_id in sorted_docs: lowercase : Dict = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence lowercase : Optional[Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: lowercase : Union[str, Any] = sequence_ids.index(self.pad_token_id ) else: lowercase : List[str] = len(_a ) lowercase : List[str] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(_a ) >= num_spans: break return nbest_spans_predictions[:num_spans] def __magic_name__ ( self , _a , _a , _a , _a , ): lowercase : Optional[int] = [] for start_index, start_score in enumerate(_a ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) lowercase : Tuple = sorted(_a , key=lambda _a : x[1] , reverse=_a ) lowercase : Any = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" lowercase : List[Any] = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(_a ) == top_spans: break return chosen_span_intervals @add_end_docstrings(a_ ) class a__ ( a_, a_ ): __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION __lowerCAmelCase = ["""input_ids""", """attention_mask"""] __lowerCAmelCase = DPRReaderTokenizer
518
"""simple docstring""" from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer _A : Dict = logging.get_logger(__name__) _A : Union[str, Any] = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } _A : str = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } _A : Optional[int] = { """facebook/blenderbot_small-90M""": 5_12, } class a__ ( a_ ): __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = BlenderbotSmallTokenizer def __init__( self , _a=None , _a=None , _a="<|endoftext|>" , _a="<|endoftext|>" , _a="<|endoftext|>" , _a=False , _a=True , **_a , ): super().__init__( ByteLevelBPETokenizer( vocab=_a , merges=_a , add_prefix_space=_a , trim_offsets=_a , ) , bos_token=_a , eos_token=_a , unk_token=_a , **_a , ) lowercase : Dict = add_prefix_space def __magic_name__ ( self , _a , _a=None ): lowercase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __magic_name__ ( self , _a , _a = None ): lowercase : List[str] = [self.sep_token_id] lowercase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
518
1
"""simple docstring""" import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase = 16 lowerCAmelCase = 32 def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : DatasetDict , snake_case_ : List[int] , snake_case_ : List[int] , snake_case_ : int = 1_6 ) ->List[str]: lowerCamelCase__ : Union[str, Any] =AutoTokenizer.from_pretrained('bert-base-cased' ) lowerCamelCase__ : Dict =DatasetDict( { 'train': dataset['train'].select(snake_case_ ), 'validation': dataset['train'].select(snake_case_ ), 'test': dataset['validation'], } ) def tokenize_function(snake_case_ : str ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase__ : Tuple =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case_ , max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCamelCase__ : List[str] =datasets.map( snake_case_ , batched=snake_case_ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase__ : Dict =tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(snake_case_ : Optional[int] ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCamelCase__ : int =1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCamelCase__ : str =1_6 elif accelerator.mixed_precision != "no": lowerCamelCase__ : Union[str, Any] =8 else: lowerCamelCase__ : int =None return tokenizer.pad( snake_case_ , padding='longest' , max_length=snake_case_ , pad_to_multiple_of=snake_case_ , return_tensors='pt' , ) # Instantiate dataloaders. lowerCamelCase__ : Optional[int] =DataLoader( tokenized_datasets['train'] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) lowerCamelCase__ : int =DataLoader( tokenized_datasets['validation'] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) lowerCamelCase__ : List[Any] =DataLoader( tokenized_datasets['test'] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) return train_dataloader, eval_dataloader, test_dataloader def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) ->Union[str, Any]: # New Code # lowerCamelCase__ : Optional[int] =[] # Download the dataset lowerCamelCase__ : Optional[int] =load_dataset('glue' , 'mrpc' ) # Create our splits lowerCamelCase__ : List[Any] =StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator lowerCamelCase__ : List[str] =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase__ : Any =config['lr'] lowerCamelCase__ : List[str] =int(config['num_epochs'] ) lowerCamelCase__ : List[Any] =int(config['seed'] ) lowerCamelCase__ : Tuple =int(config['batch_size'] ) lowerCamelCase__ : List[str] =evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation lowerCamelCase__ : int =1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowerCamelCase__ : Union[str, Any] =batch_size // MAX_GPU_BATCH_SIZE lowerCamelCase__ : Any =MAX_GPU_BATCH_SIZE set_seed(snake_case_ ) # New Code # # Create our folds: lowerCamelCase__ : Tuple =kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] ) lowerCamelCase__ : List[str] =[] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(snake_case_ ): lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] =get_fold_dataloaders( snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase__ : Optional[int] =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCamelCase__ : int =model.to(accelerator.device ) # Instantiate optimizer lowerCamelCase__ : Tuple =AdamW(params=model.parameters() , lr=snake_case_ ) # Instantiate scheduler lowerCamelCase__ : str =get_linear_schedule_with_warmup( optimizer=snake_case_ , num_warmup_steps=1_0_0 , num_training_steps=(len(snake_case_ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =accelerator.prepare( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Now we train the model for epoch in range(snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCamelCase__ : Dict =model(**snake_case_ ) lowerCamelCase__ : str =outputs.loss lowerCamelCase__ : Optional[int] =loss / gradient_accumulation_steps accelerator.backward(snake_case_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase__ : List[Any] =model(**snake_case_ ) lowerCamelCase__ : Dict =outputs.logits.argmax(dim=-1 ) lowerCamelCase__ , lowerCamelCase__ : Tuple =accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=snake_case_ , references=snake_case_ , ) lowerCamelCase__ : Dict =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , snake_case_ ) # New Code # # We also run predictions on the test set at the very end lowerCamelCase__ : Any =[] for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase__ : Optional[Any] =model(**snake_case_ ) lowerCamelCase__ : List[Any] =outputs.logits lowerCamelCase__ , lowerCamelCase__ : str =accelerator.gather_for_metrics((predictions, batch['labels']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(snake_case_ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: lowerCamelCase__ : Dict =torch.cat(snake_case_ , dim=0 ) lowerCamelCase__ : str =torch.stack(snake_case_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) lowerCamelCase__ : int =metric.compute(predictions=snake_case_ , references=snake_case_ ) accelerator.print('Average test metrics from all folds:' , snake_case_ ) def lowerCAmelCase_ ( ) ->str: lowerCamelCase__ : Tuple =argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=snake_case_ , default=snake_case_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) # New Code # parser.add_argument('--num_folds' , type=snake_case_ , default=3 , help='The number of splits to perform across the dataset' ) lowerCamelCase__ : Tuple =parser.parse_args() lowerCamelCase__ : Optional[Any] ={'lr': 2E-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6} training_function(snake_case_ , snake_case_ ) if __name__ == "__main__": main()
174
"""simple docstring""" def lowerCAmelCase_ ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ) ->int: lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =len(snake_case_ ), len(grid[0] ) if ( min(snake_case_ , snake_case_ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) lowerCamelCase__ : List[Any] =0 count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ ) count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ ) count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ ) count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
174
1
import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('0.8.3'): raise Exception('requires gluonnlp == 0.8.3') if version.parse(mx.__version__) != version.parse('1.5.0'): raise Exception('requires mxnet == 1.5.0') logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = 'The Nymphenburg Palace is a beautiful palace in Munich!' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ): '''simple docstring''' __lowerCamelCase : Union[str, Any] ={ '''attention_cell''': '''multi_head''', '''num_layers''': 4, '''units''': 1024, '''hidden_size''': 768, '''max_length''': 512, '''num_heads''': 8, '''scaled''': True, '''dropout''': 0.1, '''use_residual''': True, '''embed_size''': 1024, '''embed_dropout''': 0.1, '''word_embed''': None, '''layer_norm_eps''': 1E-5, '''token_type_vocab_size''': 2, } __lowerCamelCase : Optional[Any] =bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py __lowerCamelCase : Any =BERTEncoder( attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=SCREAMING_SNAKE_CASE , output_all_encodings=SCREAMING_SNAKE_CASE , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , SCREAMING_SNAKE_CASE ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later __lowerCamelCase : Union[str, Any] ='''openwebtext_ccnews_stories_books_cased''' # Specify download folder to Gluonnlp's vocab __lowerCamelCase : Dict =os.path.join(get_home_dir() , '''models''' ) __lowerCamelCase : Any =_load_vocab(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls=SCREAMING_SNAKE_CASE ) __lowerCamelCase : str =nlp.model.BERTModel( SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=SCREAMING_SNAKE_CASE , use_token_type_embed=SCREAMING_SNAKE_CASE , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=SCREAMING_SNAKE_CASE , use_decoder=SCREAMING_SNAKE_CASE , ) original_bort.load_parameters(SCREAMING_SNAKE_CASE , cast_dtype=SCREAMING_SNAKE_CASE , ignore_extra=SCREAMING_SNAKE_CASE ) __lowerCamelCase : List[Any] =original_bort._collect_params_with_prefix() # Build our config 🤗 __lowerCamelCase : Any ={ '''architectures''': ['''BertForMaskedLM'''], '''attention_probs_dropout_prob''': predefined_args['''dropout'''], '''hidden_act''': '''gelu''', '''hidden_dropout_prob''': predefined_args['''dropout'''], '''hidden_size''': predefined_args['''embed_size'''], '''initializer_range''': 0.02, '''intermediate_size''': predefined_args['''hidden_size'''], '''layer_norm_eps''': predefined_args['''layer_norm_eps'''], '''max_position_embeddings''': predefined_args['''max_length'''], '''model_type''': '''bort''', '''num_attention_heads''': predefined_args['''num_heads'''], '''num_hidden_layers''': predefined_args['''num_layers'''], '''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa '''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa '''vocab_size''': len(SCREAMING_SNAKE_CASE ), } __lowerCamelCase : Union[str, Any] =BertConfig.from_dict(SCREAMING_SNAKE_CASE ) __lowerCamelCase : Any =BertForMaskedLM(SCREAMING_SNAKE_CASE ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(SCREAMING_SNAKE_CASE : List[Any] ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ): __lowerCamelCase : Dict =hf_param.shape __lowerCamelCase : Dict =to_torch(params[gluon_param] ) __lowerCamelCase : int =gluon_param.shape assert ( shape_hf == shape_gluon ), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers' return gluon_param __lowerCamelCase : Tuple =check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' ) __lowerCamelCase : str =check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' ) __lowerCamelCase : str =check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' ) __lowerCamelCase : Union[str, Any] =check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) __lowerCamelCase : List[Any] =torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): __lowerCamelCase : BertLayer =hf_bort_model.bert.encoder.layer[i] # self attention __lowerCamelCase : BertSelfAttention =layer.attention.self __lowerCamelCase : str =check_and_map_params( self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' ) __lowerCamelCase : Any =check_and_map_params( self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' ) __lowerCamelCase : Any =check_and_map_params( self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' ) __lowerCamelCase : Dict =check_and_map_params( self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' ) __lowerCamelCase : Dict =check_and_map_params( self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' ) __lowerCamelCase : Optional[Any] =check_and_map_params( self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' ) # self attention output __lowerCamelCase : BertSelfOutput =layer.attention.output __lowerCamelCase : Any =check_and_map_params( self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' ) __lowerCamelCase : Dict =check_and_map_params( self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' ) __lowerCamelCase : int =check_and_map_params( self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' ) __lowerCamelCase : Any =check_and_map_params( self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' ) # intermediate __lowerCamelCase : BertIntermediate =layer.intermediate __lowerCamelCase : Optional[int] =check_and_map_params( intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' ) __lowerCamelCase : str =check_and_map_params( intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' ) # output __lowerCamelCase : BertOutput =layer.output __lowerCamelCase : int =check_and_map_params( bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' ) __lowerCamelCase : Optional[Any] =check_and_map_params( bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' ) __lowerCamelCase : Tuple =check_and_map_params( bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' ) __lowerCamelCase : Optional[Any] =check_and_map_params( bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models __lowerCamelCase : Union[str, Any] =RobertaTokenizer.from_pretrained('''roberta-base''' ) __lowerCamelCase : str =tokenizer.encode_plus(SCREAMING_SNAKE_CASE )['''input_ids'''] # Get gluon output __lowerCamelCase : Tuple =mx.nd.array([input_ids] ) __lowerCamelCase : Union[str, Any] =original_bort(inputs=SCREAMING_SNAKE_CASE , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(SCREAMING_SNAKE_CASE ) __lowerCamelCase : Dict =BertModel.from_pretrained(SCREAMING_SNAKE_CASE ) hf_bort_model.eval() __lowerCamelCase : List[Any] =tokenizer.encode_plus(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) __lowerCamelCase : List[Any] =hf_bort_model(**SCREAMING_SNAKE_CASE )[0] __lowerCamelCase : List[str] =output_gluon[0].asnumpy() __lowerCamelCase : Optional[Any] =output_hf[0].detach().numpy() __lowerCamelCase : List[Any] =np.max(np.abs(hf_layer - gluon_layer ) ).item() __lowerCamelCase : Tuple =np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ) if success: print('''✔️ Both model do output the same tensors''' ) else: print('''❌ Both model do **NOT** output the same tensors''' ) print('''Absolute difference is:''' , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _UpperCamelCase = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
704
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self :Tuple ): __lowerCamelCase : Any =Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) __lowerCamelCase : Any =Vector() def __lowercase ( self :Dict ): __lowerCamelCase : Tuple =Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(__lowercase ) , '''(0,0,0,0,0,1)''' ) def __lowercase ( self :Dict ): __lowerCamelCase : int =Vector([1, 2, 3, 4] ) self.assertEqual(len(__lowercase ) , 4 ) def __lowercase ( self :Dict ): __lowerCamelCase : Optional[Any] =Vector([1, 2] ) __lowerCamelCase : Dict =Vector([1, 2, 3, 4, 5] ) __lowerCamelCase : List[Any] =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) __lowerCamelCase : int =Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def __lowercase ( self :Optional[int] ): __lowerCamelCase : Tuple =Vector([1, 2, 3] ) __lowerCamelCase : Any =Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def __lowercase ( self :str ): __lowerCamelCase : Union[str, Any] =Vector([1, 2, 3] ) __lowerCamelCase : int =Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def __lowercase ( self :int ): __lowerCamelCase : List[Any] =Vector([1, 2, 3] ) __lowerCamelCase : List[Any] =Vector([2, -1, 4] ) # for test of dot product __lowerCamelCase : Any =Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' ) self.assertEqual((a * b) , 0 ) def __lowercase ( self :List[Any] ): self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 ) def __lowercase ( self :Union[str, Any] ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' ) def __lowercase ( self :List[Any] ): __lowerCamelCase : Any =Vector([1, 2, 3] ) __lowerCamelCase : Optional[int] =Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , __lowercase , __lowercase ) ) , '''(3,4,7)''' ) def __lowercase ( self :Dict ): __lowerCamelCase : List[Any] =Vector([1, 0, 0, 0, 0, 0] ) __lowerCamelCase : Optional[int] =x.copy() self.assertEqual(str(__lowercase ) , str(__lowercase ) ) def __lowercase ( self :int ): __lowerCamelCase : str =Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(__lowercase ) , '''(0,1,0)''' ) def __lowercase ( self :int ): __lowerCamelCase : Any =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(__lowercase ) ) def __lowercase ( self :int ): __lowerCamelCase : Tuple =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCamelCase : List[Any] =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(__lowercase , __lowercase ) ) def __lowercase ( self :Optional[int] ): __lowerCamelCase : Optional[Any] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCamelCase : Tuple =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(__lowercase , __lowercase ) ) def __lowercase ( self :Tuple ): __lowerCamelCase : Tuple =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def __lowercase ( self :int ): __lowerCamelCase : Union[str, Any] =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) __lowerCamelCase : Tuple =Vector([1, 2, 3] ) self.assertEqual('''(14,32,50)''' , str(a * x ) ) self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) ) def __lowercase ( self :Optional[Any] ): __lowerCamelCase : Optional[int] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(__lowercase ) ) def __lowercase ( self :str ): __lowerCamelCase : str =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def __lowercase ( self :Optional[int] ): __lowerCamelCase : List[str] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCamelCase : List[str] =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) ) def __lowercase ( self :Union[str, Any] ): __lowerCamelCase : int =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCamelCase : Optional[int] =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) ) def __lowercase ( self :Any ): self.assertEqual( '''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
363
0
import random from typing import Any def A__ ( lowercase: list ) -> list[Any]: for _ in range(len(lowercase ) ): A : Dict =random.randint(0, len(lowercase ) - 1 ) A : List[str] =random.randint(0, len(lowercase ) - 1 ) A , A : Union[str, Any] =data[b], data[a] return data if __name__ == "__main__": _lowercase : List[Any] =[0, 1, 2, 3, 4, 5, 6, 7] _lowercase : str =['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
305
import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures _lowercase : str =logging.get_logger(__name__) @dataclass class SCREAMING_SNAKE_CASE_ : '''simple docstring''' lowercase : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} ) lowercase : str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) lowercase : int = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) lowercase : bool = field( default=lowerCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]: A : Tuple =self.task_name.lower() class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ): '''simple docstring''' lowercase : Optional[int] = "train" lowercase : int = "dev" lowercase : Union[str, Any] = "test" class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ): '''simple docstring''' lowercase : GlueDataTrainingArguments lowercase : str lowercase : List[InputFeatures] def __init__( self : str , SCREAMING_SNAKE_CASE__ : GlueDataTrainingArguments , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizerBase , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Union[str, Split] = Split.train , SCREAMING_SNAKE_CASE__ : Optional[str] = None , ) -> List[Any]: warnings.warn( 'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , SCREAMING_SNAKE_CASE__ , ) A : Any =args A : Union[str, Any] =glue_processors[args.task_name]() A : Union[str, Any] =glue_output_modes[args.task_name] if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): try: A : Any =Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) # Load data features from cache or dataset file A : Tuple =os.path.join( cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , ) A : Optional[Any] =self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) A , A : str =label_list[2], label_list[1] A : Tuple =label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. A : int =cached_features_file + '.lock' with FileLock(SCREAMING_SNAKE_CASE__ ): if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not args.overwrite_cache: A : Optional[Any] =time.time() A : str =torch.load(SCREAMING_SNAKE_CASE__ ) logger.info( f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) else: logger.info(f'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: A : int =self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: A : Dict =self.processor.get_test_examples(args.data_dir ) else: A : Optional[Any] =self.processor.get_train_examples(args.data_dir ) if limit_length is not None: A : Optional[int] =examples[:limit_length] A : int =glue_convert_examples_to_features( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , max_length=args.max_seq_length , label_list=SCREAMING_SNAKE_CASE__ , output_mode=self.output_mode , ) A : List[Any] =time.time() torch.save(self.features , SCREAMING_SNAKE_CASE__ ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self : Optional[Any] ) -> Union[str, Any]: return len(self.features ) def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> InputFeatures: return self.features[i] def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]: return self.label_list
305
1
"""simple docstring""" def lowerCamelCase_ ( ) ->str: """simple docstring""" for n in range(1 , 1_00_00_00 ): yield n * (n + 1) // 2 def lowerCamelCase_ ( UpperCAmelCase_ ) ->Union[str, Any]: """simple docstring""" __UpperCAmelCase : Dict = 1 __UpperCAmelCase : Any = 2 while i * i <= n: __UpperCAmelCase : Tuple = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def lowerCamelCase_ ( ) ->Optional[int]: """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(UpperCAmelCase_ ) > 5_00 ) if __name__ == "__main__": print(solution())
701
"""simple docstring""" def lowerCamelCase_ ( UpperCAmelCase_ = 10_00 ) ->int: """simple docstring""" return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
374
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: A = None A = logging.get_logger(__name__) A = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} A = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json', 'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json', }, } A = { 'facebook/mbart-large-en-ro': 1024, 'facebook/mbart-large-cc25': 1024, } # fmt: off A = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class __snake_case ( a__): _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = ['''input_ids''', '''attention_mask'''] _lowerCAmelCase = MBartTokenizer _lowerCAmelCase = [] _lowerCAmelCase = [] def __init__( self, A=None, A=None, A="<s>", A="</s>", A="</s>", A="<s>", A="<unk>", A="<pad>", A="<mask>", A=None, A=None, A=None, **A, ): """simple docstring""" lowerCamelCase : str = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else mask_token super().__init__( vocab_file=A, tokenizer_file=A, bos_token=A, eos_token=A, sep_token=A, cls_token=A, unk_token=A, pad_token=A, mask_token=A, src_lang=A, tgt_lang=A, additional_special_tokens=A, **A, ) lowerCamelCase : List[Any] = vocab_file lowerCamelCase : Union[str, Any] = False if not self.vocab_file else True lowerCamelCase : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) lowerCamelCase : Any = { lang_code: self.convert_tokens_to_ids(A ) for lang_code in FAIRSEQ_LANGUAGE_CODES } lowerCamelCase : List[Any] = src_lang if src_lang is not None else 'en_XX' lowerCamelCase : Optional[Any] = self.convert_tokens_to_ids(self._src_lang ) lowerCamelCase : Any = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCAmelCase_ ( self ): """simple docstring""" return self._src_lang @src_lang.setter def UpperCAmelCase_ ( self, A ): """simple docstring""" lowerCamelCase : Union[str, Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCAmelCase_ ( self, A, A = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCAmelCase_ ( self, A, A = None ): """simple docstring""" lowerCamelCase : Dict = [self.sep_token_id] lowerCamelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase_ ( self, A, A, A, A, **A ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) lowerCamelCase : List[Any] = src_lang lowerCamelCase : int = self(A, add_special_tokens=A, return_tensors=A, **A ) lowerCamelCase : Optional[Any] = self.convert_tokens_to_ids(A ) lowerCamelCase : Dict = tgt_lang_id return inputs def UpperCAmelCase_ ( self, A, A = "en_XX", A = None, A = "ro_RO", **A, ): """simple docstring""" lowerCamelCase : Tuple = src_lang lowerCamelCase : Union[str, Any] = tgt_lang return super().prepare_seqaseq_batch(A, A, **A ) def UpperCAmelCase_ ( self ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def UpperCAmelCase_ ( self ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCAmelCase_ ( self, A ): """simple docstring""" lowerCamelCase : Tuple = self.convert_tokens_to_ids(A ) lowerCamelCase : Tuple = [] lowerCamelCase : List[Any] = [self.eos_token_id, self.cur_lang_code] lowerCamelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens ) lowerCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) lowerCamelCase : int = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), ) def UpperCAmelCase_ ( self, A ): """simple docstring""" lowerCamelCase : Optional[Any] = self.convert_tokens_to_ids(A ) lowerCamelCase : str = [] lowerCamelCase : str = [self.eos_token_id, self.cur_lang_code] lowerCamelCase : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) lowerCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens ) lowerCamelCase : Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), ) def UpperCAmelCase_ ( self, A, A = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(A ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' ) return lowerCamelCase : Any = os.path.join( A, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ): copyfile(self.vocab_file, A ) return (out_vocab_file,)
320
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A = { 'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ 'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
import os import pytest from attr import dataclass A_ : Union[str, Any] = 'us-east-1' # defaults region @dataclass class _a : '''simple docstring''' UpperCAmelCase__: str UpperCAmelCase__: Any = '''arn:aws:iam::558105141721:role/sagemaker_execution_role''' UpperCAmelCase__: Tuple = { '''task_name''': '''mnli''', '''per_device_train_batch_size''': 16, '''per_device_eval_batch_size''': 16, '''do_train''': True, '''do_eval''': True, '''do_predict''': True, '''output_dir''': '''/opt/ml/model''', '''overwrite_output_dir''': True, '''max_steps''': 5_00, '''save_steps''': 55_00, } UpperCAmelCase__: str = {**hyperparameters, '''max_steps''': 10_00} @property def __A ( self ): if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def __A ( self ): return F"""{self.framework}-transfromers-test""" @property def __A ( self ): return F"""./tests/sagemaker/scripts/{self.framework}""" @property def __A ( self ): if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="""class""" ) def UpperCamelCase (lowercase_: Tuple ) -> List[str]: A__ : str = SageMakerTestEnvironment(framework=request.cls.framework )
64
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore A_ : Dict = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" A_ : Optional[Any] = [file for file in filepaths if file != file.lower()] if upper_files: print(f'''{len(upper_files)} files contain uppercase characters:''') print('\n'.join(upper_files) + '\n') A_ : Tuple = [file for file in filepaths if ' ' in file] if space_files: print(f'''{len(space_files)} files contain space characters:''') print('\n'.join(space_files) + '\n') A_ : Any = [file for file in filepaths if '-' in file] if hyphen_files: print(f'''{len(hyphen_files)} files contain hyphen characters:''') print('\n'.join(hyphen_files) + '\n') A_ : List[str] = [file for file in filepaths if os.sep not in file] if nodir_files: print(f'''{len(nodir_files)} files are not in a directory:''') print('\n'.join(nodir_files) + '\n') A_ : Any = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
64
1
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _lowercase : List[Any] ={ """facebook/mask2former-swin-small-coco-instance""": ( """https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json""" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } _lowercase : Optional[Any] =logging.get_logger(__name__) class UpperCamelCase_ ( __a ): _a : int = """mask2former""" _a : Optional[Any] = ["""swin"""] _a : Any = {"""hidden_size""": """hidden_dim"""} def __init__( self : int , lowerCamelCase : Optional[int] = None , lowerCamelCase : Union[str, Any] = 2_56 , lowerCamelCase : List[Any] = 2_56 , lowerCamelCase : Dict = 2_56 , lowerCamelCase : List[str] = 10_24 , lowerCamelCase : int = "relu" , lowerCamelCase : Union[str, Any] = 6 , lowerCamelCase : Tuple = 10 , lowerCamelCase : Dict = 8 , lowerCamelCase : Any = 0.0 , lowerCamelCase : List[Any] = 20_48 , lowerCamelCase : Union[str, Any] = False , lowerCamelCase : List[str] = False , lowerCamelCase : List[str] = 4 , lowerCamelCase : Dict = 2_55 , lowerCamelCase : Dict = 1_00 , lowerCamelCase : Optional[Any] = 0.1 , lowerCamelCase : Optional[int] = 2.0 , lowerCamelCase : List[str] = 5.0 , lowerCamelCase : Dict = 5.0 , lowerCamelCase : Tuple = 1_25_44 , lowerCamelCase : Any = 3.0 , lowerCamelCase : Optional[Any] = 0.75 , lowerCamelCase : str = 0.02 , lowerCamelCase : Union[str, Any] = 1.0 , lowerCamelCase : List[str] = True , lowerCamelCase : List[Any] = [4, 8, 16, 32] , lowerCamelCase : int = None , **lowerCamelCase : Dict , ): if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) lowerCamelCase_ : Dict = CONFIG_MAPPING['swin']( image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_lowerCamelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(_lowerCamelCase , _lowerCamelCase ): lowerCamelCase_ : Dict = backbone_config.pop('model_type' ) lowerCamelCase_ : int = CONFIG_MAPPING[backbone_model_type] lowerCamelCase_ : List[str] = config_class.from_dict(_lowerCamelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. " F"Supported model types: {','.join(self.backbones_supported )}" ) lowerCamelCase_ : Any = backbone_config lowerCamelCase_ : Any = feature_size lowerCamelCase_ : Dict = mask_feature_size lowerCamelCase_ : Union[str, Any] = hidden_dim lowerCamelCase_ : Tuple = encoder_feedforward_dim lowerCamelCase_ : Optional[int] = activation_function lowerCamelCase_ : str = encoder_layers lowerCamelCase_ : int = decoder_layers lowerCamelCase_ : str = num_attention_heads lowerCamelCase_ : str = dropout lowerCamelCase_ : List[Any] = dim_feedforward lowerCamelCase_ : Tuple = pre_norm lowerCamelCase_ : str = enforce_input_projection lowerCamelCase_ : Tuple = common_stride lowerCamelCase_ : int = ignore_value lowerCamelCase_ : int = num_queries lowerCamelCase_ : int = no_object_weight lowerCamelCase_ : Dict = class_weight lowerCamelCase_ : Tuple = mask_weight lowerCamelCase_ : str = dice_weight lowerCamelCase_ : str = train_num_points lowerCamelCase_ : Tuple = oversample_ratio lowerCamelCase_ : int = importance_sample_ratio lowerCamelCase_ : Union[str, Any] = init_std lowerCamelCase_ : Dict = init_xavier_std lowerCamelCase_ : Optional[Any] = use_auxiliary_loss lowerCamelCase_ : List[str] = feature_strides lowerCamelCase_ : Tuple = output_auxiliary_logits lowerCamelCase_ : Any = decoder_layers super().__init__(**_lowerCamelCase ) @classmethod def __a ( cls : Union[str, Any] , lowerCamelCase : List[str] , **lowerCamelCase : int ): return cls( backbone_config=_lowerCamelCase , **_lowerCamelCase , ) def __a ( self : Optional[Any] ): lowerCamelCase_ : Tuple = copy.deepcopy(self.__dict__ ) lowerCamelCase_ : Tuple = self.backbone_config.to_dict() lowerCamelCase_ : List[Any] = self.__class__.model_type return output
364
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''', '''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''', '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''', '''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''', '''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''', '''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''', '''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''', '''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''', '''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''', '''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''', '''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''', '''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''', } class __a ( __a ): '''simple docstring''' _lowerCamelCase : List[Any] = """codegen""" _lowerCamelCase : List[str] = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , _lowerCamelCase=50_400 , _lowerCamelCase=2_048 , _lowerCamelCase=2_048 , _lowerCamelCase=4_096 , _lowerCamelCase=28 , _lowerCamelCase=16 , _lowerCamelCase=64 , _lowerCamelCase=None , _lowerCamelCase="gelu_new" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.02 , _lowerCamelCase=True , _lowerCamelCase=50_256 , _lowerCamelCase=50_256 , _lowerCamelCase=False , **_lowerCamelCase , ) -> Union[str, Any]: '''simple docstring''' __lowercase = vocab_size __lowercase = n_ctx __lowercase = n_positions __lowercase = n_embd __lowercase = n_layer __lowercase = n_head __lowercase = n_inner __lowercase = rotary_dim __lowercase = activation_function __lowercase = resid_pdrop __lowercase = embd_pdrop __lowercase = attn_pdrop __lowercase = layer_norm_epsilon __lowercase = initializer_range __lowercase = use_cache __lowercase = bos_token_id __lowercase = eos_token_id super().__init__( bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase ) class __a ( __a ): '''simple docstring''' def __init__( self , _lowerCamelCase , _lowerCamelCase = "default" , _lowerCamelCase = None , _lowerCamelCase = False , ) -> str: '''simple docstring''' super().__init__(_lowerCamelCase , task=_lowerCamelCase , patching_specs=_lowerCamelCase , use_past=_lowerCamelCase ) if not getattr(self._config , "pad_token_id" , _lowerCamelCase ): # TODO: how to do that better? __lowercase = 0 @property def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' __lowercase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(_lowerCamelCase , direction="inputs" ) __lowercase = {0: "batch", 1: "past_sequence + sequence"} else: __lowercase = {0: "batch", 1: "sequence"} return common_inputs @property def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return self._config.n_layer @property def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return self._config.n_head def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]: '''simple docstring''' __lowercase = super(_lowerCamelCase , self ).generate_dummy_inputs( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) # We need to order the input in the way they appears in the forward() __lowercase = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch __lowercase , __lowercase = common_inputs["input_ids"].shape # Not using the same length for past_key_values __lowercase = seqlen + 2 __lowercase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __lowercase = [ (torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(self.num_layers ) ] __lowercase = common_inputs["attention_mask"] if self.use_past: __lowercase = ordered_inputs["attention_mask"].dtype __lowercase = torch.cat( [ordered_inputs["attention_mask"], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 ) return ordered_inputs @property def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return 13
118
0
'''simple docstring''' import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Dict = DownBlockaD # noqa F405 __UpperCamelCase : Tuple = 'down' def _snake_case (self ): __lowerCAmelCase = [-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : List[str] = ResnetDownsampleBlockaD # noqa F405 __UpperCamelCase : Any = 'down' def _snake_case (self ): __lowerCAmelCase = [0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Union[str, Any] = AttnDownBlockaD # noqa F405 __UpperCamelCase : List[str] = 'down' def _snake_case (self ): __lowerCAmelCase = [0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Union[str, Any] = CrossAttnDownBlockaD # noqa F405 __UpperCamelCase : Optional[int] = 'down' def _snake_case (self ): __lowerCAmelCase , __lowerCAmelCase = super().prepare_init_args_and_inputs_for_common() __lowerCAmelCase = 32 return init_dict, inputs_dict def _snake_case (self ): __lowerCAmelCase = [0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : List[Any] = SimpleCrossAttnDownBlockaD # noqa F405 __UpperCamelCase : List[str] = 'down' @property def _snake_case (self ): return super().get_dummy_input(include_encoder_hidden_states=__lowercase ) def _snake_case (self ): __lowerCAmelCase , __lowerCAmelCase = super().prepare_init_args_and_inputs_for_common() __lowerCAmelCase = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' ) def _snake_case (self ): __lowerCAmelCase = [0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Dict = SkipDownBlockaD # noqa F405 __UpperCamelCase : Dict = 'down' @property def _snake_case (self ): return super().get_dummy_input(include_skip_sample=__lowercase ) def _snake_case (self ): __lowerCAmelCase = [-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = AttnSkipDownBlockaD # noqa F405 __UpperCamelCase : int = 'down' @property def _snake_case (self ): return super().get_dummy_input(include_skip_sample=__lowercase ) def _snake_case (self ): __lowerCAmelCase = [0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = DownEncoderBlockaD # noqa F405 __UpperCamelCase : str = 'down' @property def _snake_case (self ): return super().get_dummy_input(include_temb=__lowercase ) def _snake_case (self ): __lowerCAmelCase = { '''in_channels''': 32, '''out_channels''': 32, } __lowerCAmelCase = self.dummy_input return init_dict, inputs_dict def _snake_case (self ): __lowerCAmelCase = [1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Union[str, Any] = AttnDownEncoderBlockaD # noqa F405 __UpperCamelCase : Union[str, Any] = 'down' @property def _snake_case (self ): return super().get_dummy_input(include_temb=__lowercase ) def _snake_case (self ): __lowerCAmelCase = { '''in_channels''': 32, '''out_channels''': 32, } __lowerCAmelCase = self.dummy_input return init_dict, inputs_dict def _snake_case (self ): __lowerCAmelCase = [0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : List[Any] = UNetMidBlockaD # noqa F405 __UpperCamelCase : Tuple = 'mid' def _snake_case (self ): __lowerCAmelCase = { '''in_channels''': 32, '''temb_channels''': 1_28, } __lowerCAmelCase = self.dummy_input return init_dict, inputs_dict def _snake_case (self ): __lowerCAmelCase = [-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : str = UNetMidBlockaDCrossAttn # noqa F405 __UpperCamelCase : Tuple = 'mid' def _snake_case (self ): __lowerCAmelCase , __lowerCAmelCase = super().prepare_init_args_and_inputs_for_common() __lowerCAmelCase = 32 return init_dict, inputs_dict def _snake_case (self ): __lowerCAmelCase = [0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Dict = UNetMidBlockaDSimpleCrossAttn # noqa F405 __UpperCamelCase : List[str] = 'mid' @property def _snake_case (self ): return super().get_dummy_input(include_encoder_hidden_states=__lowercase ) def _snake_case (self ): __lowerCAmelCase , __lowerCAmelCase = super().prepare_init_args_and_inputs_for_common() __lowerCAmelCase = 32 return init_dict, inputs_dict def _snake_case (self ): __lowerCAmelCase = [0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[int] = UpBlockaD # noqa F405 __UpperCamelCase : Optional[Any] = 'up' @property def _snake_case (self ): return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase ) def _snake_case (self ): __lowerCAmelCase = [-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : List[Any] = ResnetUpsampleBlockaD # noqa F405 __UpperCamelCase : Tuple = 'up' @property def _snake_case (self ): return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase ) def _snake_case (self ): __lowerCAmelCase = [0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Tuple = CrossAttnUpBlockaD # noqa F405 __UpperCamelCase : Union[str, Any] = 'up' @property def _snake_case (self ): return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase ) def _snake_case (self ): __lowerCAmelCase , __lowerCAmelCase = super().prepare_init_args_and_inputs_for_common() __lowerCAmelCase = 32 return init_dict, inputs_dict def _snake_case (self ): __lowerCAmelCase = [-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : List[Any] = SimpleCrossAttnUpBlockaD # noqa F405 __UpperCamelCase : str = 'up' @property def _snake_case (self ): return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase , include_encoder_hidden_states=__lowercase ) def _snake_case (self ): __lowerCAmelCase , __lowerCAmelCase = super().prepare_init_args_and_inputs_for_common() __lowerCAmelCase = 32 return init_dict, inputs_dict def _snake_case (self ): __lowerCAmelCase = [0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : int = AttnUpBlockaD # noqa F405 __UpperCamelCase : Optional[int] = 'up' @property def _snake_case (self ): return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase ) @unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' ) def _snake_case (self ): __lowerCAmelCase = [0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : str = SkipUpBlockaD # noqa F405 __UpperCamelCase : Tuple = 'up' @property def _snake_case (self ): return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase ) def _snake_case (self ): __lowerCAmelCase = [-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Dict = AttnSkipUpBlockaD # noqa F405 __UpperCamelCase : int = 'up' @property def _snake_case (self ): return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase ) def _snake_case (self ): __lowerCAmelCase = [0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : int = UpDecoderBlockaD # noqa F405 __UpperCamelCase : List[str] = 'up' @property def _snake_case (self ): return super().get_dummy_input(include_temb=__lowercase ) def _snake_case (self ): __lowerCAmelCase = {'''in_channels''': 32, '''out_channels''': 32} __lowerCAmelCase = self.dummy_input return init_dict, inputs_dict def _snake_case (self ): __lowerCAmelCase = [0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7] super().test_output(__lowercase ) class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Union[str, Any] = AttnUpDecoderBlockaD # noqa F405 __UpperCamelCase : List[str] = 'up' @property def _snake_case (self ): return super().get_dummy_input(include_temb=__lowercase ) def _snake_case (self ): __lowerCAmelCase = {'''in_channels''': 32, '''out_channels''': 32} __lowerCAmelCase = self.dummy_input return init_dict, inputs_dict def _snake_case (self ): __lowerCAmelCase = [0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8] super().test_output(__lowercase )
474
'''simple docstring''' from torch import nn def __magic_name__( lowerCamelCase): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"""Unsupported activation function: {act_fn}""")
474
1
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING _a : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(__a ) class __A ( __a ): def __init__( self , *a__ , **a__ ): super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def __A ( self , a__=None ): _lowerCAmelCase : Any = {} if top_k is not None: _lowerCAmelCase : Tuple = top_k return {}, {}, postprocess_params def __call__( self , a__ , **a__ ): return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ ) def __A ( self , a__ ): _lowerCAmelCase : Union[str, Any] = load_image(lowerCAmelCase__ ) _lowerCAmelCase : Union[str, Any] = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework ) return model_inputs def __A ( self , a__ ): _lowerCAmelCase : Any = self.model(**lowerCAmelCase__ ) return model_outputs def __A ( self , a__ , a__=5 ): if top_k > self.model.config.num_labels: _lowerCAmelCase : Dict = self.model.config.num_labels if self.framework == "pt": _lowerCAmelCase : Tuple = model_outputs.logits.softmax(-1 )[0] _lowerCAmelCase : Dict = probs.topk(lowerCAmelCase__ ) elif self.framework == "tf": _lowerCAmelCase : Dict = stable_softmax(model_outputs.logits , axis=-1 )[0] _lowerCAmelCase : Any = tf.math.top_k(lowerCAmelCase__ , k=lowerCAmelCase__ ) _lowerCAmelCase : str = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F"Unsupported framework: {self.framework}" ) _lowerCAmelCase : str = scores.tolist() _lowerCAmelCase : Tuple = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
213
'''simple docstring''' import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": lowerCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--txt2img_unclip''', default='''kakaobrain/karlo-v1-alpha''', type=str, required=False, help='''The pretrained txt2img unclip.''', ) lowerCAmelCase_ : Any = parser.parse_args() lowerCAmelCase_ : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) lowerCAmelCase_ : Any = CLIPImageProcessor() lowerCAmelCase_ : Optional[Any] = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''') lowerCAmelCase_ : Dict = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
414
0
def snake_case_ (__A : float , __A : int ) -> float: if digit_amount > 0: return round(number - int(__A ) , __A ) return number - int(__A ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
218
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = { """configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["""MobileViTFeatureExtractor"""] __UpperCAmelCase = ["""MobileViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MobileViTForImageClassification""", """MobileViTForSemanticSegmentation""", """MobileViTModel""", """MobileViTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFMobileViTForImageClassification""", """TFMobileViTForSemanticSegmentation""", """TFMobileViTModel""", """TFMobileViTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
218
1
lowerCamelCase__ : int = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)] def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: snake_case__ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution lowerCamelCase__ : list[bool | None] = [None] * 1_0_0_0_0_0_0_0 lowerCamelCase__ : Optional[Any] = True lowerCamelCase__ : Union[str, Any] = False def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool: if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore snake_case__ = chain(next_number(__lowerCAmelCase ) ) snake_case__ = number_chain while number < 1000_0000: snake_case__ = number_chain number *= 10 return number_chain def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 1000_0000 ) -> int: for i in range(1 , __lowerCAmelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F"""{solution() = }""")
33
def _A ( SCREAMING_SNAKE_CASE__ : str ): UpperCamelCase :Union[str, Any] = hex_num.strip() if not hex_num: raise ValueError('''No value was passed to the function''' ) UpperCamelCase :str = hex_num[0] == '''-''' if is_negative: UpperCamelCase :Union[str, Any] = hex_num[1:] try: UpperCamelCase :Optional[Any] = int(SCREAMING_SNAKE_CASE__ , 16 ) except ValueError: raise ValueError('''Invalid value was passed to the function''' ) UpperCamelCase :Dict = '''''' while int_num > 0: UpperCamelCase :Tuple = str(int_num % 2 ) + bin_str int_num >>= 1 return int(('''-''' + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
658
0
'''simple docstring''' def snake_case ( a_ : dict ) -> set: """simple docstring""" UpperCamelCase_ : Union[str, Any] = set() # edges = list of graph's edges UpperCamelCase_ : int = get_edges(a_ ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: UpperCamelCase_ , UpperCamelCase_ : List[Any] = edges.pop() chosen_vertices.add(a_ ) chosen_vertices.add(a_ ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(a_ ) return chosen_vertices def snake_case ( a_ : dict ) -> set: """simple docstring""" UpperCamelCase_ : Optional[Any] = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
543
'''simple docstring''' def snake_case ( a_ : int ) -> int: """simple docstring""" assert ( isinstance(a_ , a_ ) and number_of_steps > 0 ), f"number_of_steps needs to be positive integer, your input {number_of_steps}" if number_of_steps == 1: return 1 UpperCamelCase_ , UpperCamelCase_ : str = 1, 1 for _ in range(number_of_steps - 1 ): UpperCamelCase_ , UpperCamelCase_ : Tuple = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
543
1
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Tuple = old_name if "patch_embed" in old_name: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[int] = old_name.split('''.''' ) if layer == "0": __UpperCamelCase :int = old_name.replace('''0''' , '''convolution1''' ) elif layer == "1": __UpperCamelCase :Dict = old_name.replace('''1''' , '''batchnorm_before''' ) elif layer == "3": __UpperCamelCase :Any = old_name.replace('''3''' , '''convolution2''' ) else: __UpperCamelCase :List[str] = old_name.replace('''4''' , '''batchnorm_after''' ) if "network" in old_name and re.search(R'''\d\.\d''' , SCREAMING_SNAKE_CASE ): __UpperCamelCase :Optional[int] = R'''\b\d{2}\b''' if bool(re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ): __UpperCamelCase :Tuple = re.search(R'''\d\.\d\d.''' , SCREAMING_SNAKE_CASE ).group() else: __UpperCamelCase :Any = re.search(R'''\d\.\d.''' , SCREAMING_SNAKE_CASE ).group() if int(match[0] ) < 6: __UpperCamelCase :List[str] = old_name.replace(SCREAMING_SNAKE_CASE , '''''' ) __UpperCamelCase :List[Any] = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] ) __UpperCamelCase :List[Any] = '''intermediate_stages.''' + trimmed_name else: __UpperCamelCase :List[str] = old_name.replace(SCREAMING_SNAKE_CASE , '''''' ) if int(match[2] ) < num_meta4D_last_stage: __UpperCamelCase :List[Any] = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] ) else: __UpperCamelCase :Tuple = str(int(match[2] ) - num_meta4D_last_stage ) __UpperCamelCase :Optional[Any] = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index ) if "norm1" in old_name: __UpperCamelCase :Optional[Any] = trimmed_name.replace('''norm1''' , '''layernorm1''' ) elif "norm2" in old_name: __UpperCamelCase :Any = trimmed_name.replace('''norm2''' , '''layernorm2''' ) elif "fc1" in old_name: __UpperCamelCase :int = trimmed_name.replace('''fc1''' , '''linear_in''' ) elif "fc2" in old_name: __UpperCamelCase :int = trimmed_name.replace('''fc2''' , '''linear_out''' ) __UpperCamelCase :Any = '''last_stage.''' + trimmed_name elif "network" in old_name and re.search(R'''.\d.''' , SCREAMING_SNAKE_CASE ): __UpperCamelCase :Optional[int] = old_name.replace('''network''' , '''intermediate_stages''' ) if "fc" in new_name: __UpperCamelCase :int = new_name.replace('''fc''' , '''convolution''' ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): __UpperCamelCase :Any = new_name.replace('''norm1''' , '''batchnorm_before''' ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): __UpperCamelCase :str = new_name.replace('''norm2''' , '''batchnorm_after''' ) if "proj" in new_name: __UpperCamelCase :List[str] = new_name.replace('''proj''' , '''projection''' ) if "dist_head" in new_name: __UpperCamelCase :List[str] = new_name.replace('''dist_head''' , '''distillation_classifier''' ) elif "head" in new_name: __UpperCamelCase :Dict = new_name.replace('''head''' , '''classifier''' ) elif "patch_embed" in new_name: __UpperCamelCase :Tuple = '''efficientformer.''' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": __UpperCamelCase :Union[str, Any] = new_name.replace('''norm''' , '''layernorm''' ) __UpperCamelCase :Dict = '''efficientformer.''' + new_name else: __UpperCamelCase :Optional[int] = '''efficientformer.encoder.''' + new_name return new_name def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' for key in checkpoint.copy().keys(): __UpperCamelCase :str = checkpoint.pop(SCREAMING_SNAKE_CASE ) __UpperCamelCase :Dict = val return checkpoint def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __UpperCamelCase :int = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ) return image def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Dict = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model'''] __UpperCamelCase :int = EfficientFormerConfig.from_json_file(SCREAMING_SNAKE_CASE ) __UpperCamelCase :List[str] = EfficientFormerForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE ) __UpperCamelCase :List[Any] = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] ) __UpperCamelCase :Dict = config.depths[-1] - config.num_metaad_blocks + 1 __UpperCamelCase :int = convert_torch_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) model.load_state_dict(SCREAMING_SNAKE_CASE ) model.eval() __UpperCamelCase :Tuple = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } # prepare image __UpperCamelCase :Tuple = prepare_img() __UpperCamelCase :Dict = 256 __UpperCamelCase :List[Any] = 224 __UpperCamelCase :List[Any] = EfficientFormerImageProcessor( size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , ) __UpperCamelCase :Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values # original processing pipeline __UpperCamelCase :Any = Compose( [ Resize(SCREAMING_SNAKE_CASE , interpolation=pillow_resamplings['''bicubic'''] ), CenterCrop(SCREAMING_SNAKE_CASE ), ToTensor(), Normalize(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), ] ) __UpperCamelCase :Any = image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __UpperCamelCase :List[str] = model(SCREAMING_SNAKE_CASE ) __UpperCamelCase :Dict = outputs.logits __UpperCamelCase :List[Any] = (1, 1_000) if "l1" in model_name: __UpperCamelCase :str = torch.Tensor( [-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] ) assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE , atol=1e-3 ) assert logits.shape == expected_shape elif "l3" in model_name: __UpperCamelCase :int = torch.Tensor( [-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] ) assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE , atol=1e-3 ) assert logits.shape == expected_shape elif "l7" in model_name: __UpperCamelCase :List[Any] = torch.Tensor( [-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] ) assert logits.shape == expected_shape else: raise ValueError( f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" ) # Save Checkpoints Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) model.save_pretrained(SCREAMING_SNAKE_CASE ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) print(f"""Processor successfuly saved at {pytorch_dump_path}""" ) if push_to_hub: print('''Pushing model to the hub...''' ) model.push_to_hub( repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , ) processor.push_to_hub( repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to EfficientFormer pytorch checkpoint.''', ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The json file for EfficientFormer model config.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) parser.set_defaults(push_to_hub=True) __lowercase = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
167
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __lowercase = logging.getLogger(__name__) @dataclass class lowerCamelCase_ : '''simple docstring''' a__ : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) a__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a__ : bool = field(default=UpperCAmelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. a__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class lowerCamelCase_ : '''simple docstring''' a__ : str = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) a__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) a__ : int = field( default=1_2_8 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : bool = field( default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ''' --overwrite_output_dir to overcome.''' ) __UpperCamelCase :Union[str, Any] = import_module('''tasks''' ) try: __UpperCamelCase :int = getattr(SCREAMING_SNAKE_CASE , model_args.task_type ) __UpperCamelCase :TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __UpperCamelCase :Optional[Any] = token_classification_task.get_labels(data_args.labels ) __UpperCamelCase :Dict[int, str] = dict(enumerate(SCREAMING_SNAKE_CASE ) ) __UpperCamelCase :Tuple = len(SCREAMING_SNAKE_CASE ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCamelCase :Optional[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , ) __UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) __UpperCamelCase :str = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , ) # Get datasets __UpperCamelCase :Dict = ( TokenClassificationDataset( token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __UpperCamelCase :Optional[int] = ( TokenClassificationDataset( token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple[List[int], List[int]]: __UpperCamelCase :Optional[int] = np.argmax(SCREAMING_SNAKE_CASE , axis=2 ) __UpperCamelCase , __UpperCamelCase :int = preds.shape __UpperCamelCase :str = [[] for _ in range(SCREAMING_SNAKE_CASE )] __UpperCamelCase :int = [[] for _ in range(SCREAMING_SNAKE_CASE )] for i in range(SCREAMING_SNAKE_CASE ): for j in range(SCREAMING_SNAKE_CASE ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(SCREAMING_SNAKE_CASE ) -> Dict: __UpperCamelCase , __UpperCamelCase :Optional[Any] = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "precision": precision_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "recall": recall_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "f1": fa_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), } # Data collator __UpperCamelCase :Tuple = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __UpperCamelCase :Union[str, Any] = Trainer( model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __UpperCamelCase :int = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __UpperCamelCase :Union[str, Any] = trainer.evaluate() __UpperCamelCase :List[str] = os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_process_zero(): with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) writer.write('''%s = %s\n''' % (key, value) ) results.update(SCREAMING_SNAKE_CASE ) # Predict if training_args.do_predict: __UpperCamelCase :Any = TokenClassificationDataset( token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = trainer.predict(SCREAMING_SNAKE_CASE ) __UpperCamelCase , __UpperCamelCase :Tuple = align_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __UpperCamelCase :Tuple = os.path.join(training_args.output_dir , '''test_results.txt''' ) if trainer.is_world_process_zero(): with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer: for key, value in metrics.items(): logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) writer.write('''%s = %s\n''' % (key, value) ) # Save predictions __UpperCamelCase :List[str] = os.path.join(training_args.output_dir , '''test_predictions.txt''' ) if trainer.is_world_process_zero(): with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer: with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f: token_classification_task.write_predictions_to_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return results def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' main() if __name__ == "__main__": main()
167
1
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCAmelCase_ : """simple docstring""" @staticmethod def snake_case ( *lowerCAmelCase , **lowerCAmelCase ): """simple docstring""" pass @is_pipeline_test @require_vision class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" @require_torch def snake_case ( self ): """simple docstring""" snake_case = pipeline( model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , ) snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) snake_case = image_classifier(lowerCAmelCase , candidate_labels=['a', 'b', 'c'] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(lowerCAmelCase ) , [ [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}], [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'c'}, {'score': 0.3_33, 'label': 'b'}], ] , ) snake_case = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 ) self.assertEqual( nested_simplify(lowerCAmelCase ) , [ [ {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, ], [ {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, ], [ {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, ], [ {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, ], [ {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, ], ] , ) @require_tf def snake_case ( self ): """simple docstring""" snake_case = pipeline( model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' ) snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) snake_case = image_classifier(lowerCAmelCase , candidate_labels=['a', 'b', 'c'] ) self.assertEqual( nested_simplify(lowerCAmelCase ) , [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}] , ) snake_case = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 ) self.assertEqual( nested_simplify(lowerCAmelCase ) , [ [ {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, ], [ {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, ], [ {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, ], [ {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, ], [ {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, {'score': 0.3_33, 'label': ANY(lowerCAmelCase )}, ], ] , ) @slow @require_torch def snake_case ( self ): """simple docstring""" snake_case = pipeline( task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , ) # This is an image of 2 cats with remotes and no planes snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) snake_case = image_classifier(lowerCAmelCase , candidate_labels=['cat', 'plane', 'remote'] ) self.assertEqual( nested_simplify(lowerCAmelCase ) , [ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ] , ) snake_case = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 ) self.assertEqual( nested_simplify(lowerCAmelCase ) , [ [ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ], ] * 5 , ) @slow @require_tf def snake_case ( self ): """simple docstring""" snake_case = pipeline( task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' ) # This is an image of 2 cats with remotes and no planes snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) snake_case = image_classifier(lowerCAmelCase , candidate_labels=['cat', 'plane', 'remote'] ) self.assertEqual( nested_simplify(lowerCAmelCase ) , [ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ] , ) snake_case = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 ) self.assertEqual( nested_simplify(lowerCAmelCase ) , [ [ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ], ] * 5 , )
705
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed SCREAMING_SNAKE_CASE__ = { "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), "bert": (BertConfig, BertForMaskedLM, BertTokenizer), "gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Any: """simple docstring""" assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] ) -> int: """simple docstring""" if args.student_type == "roberta": snake_case = False elif args.student_type == "gpt2": snake_case = False def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict ) -> Tuple: """simple docstring""" if args.student_type == "roberta": snake_case = False def lowerCAmelCase__ ( ) -> Optional[int]: """simple docstring""" snake_case = argparse.ArgumentParser(description='Training' ) parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path' , type=_UpperCamelCase , required=_UpperCamelCase , help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file' , type=_UpperCamelCase , required=_UpperCamelCase , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , ) parser.add_argument( '--student_type' , type=_UpperCamelCase , choices=['distilbert', 'roberta', 'gpt2'] , required=_UpperCamelCase , help='The student type (DistilBERT, RoBERTa).' , ) parser.add_argument('--student_config' , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights' , default=_UpperCamelCase , type=_UpperCamelCase , help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_UpperCamelCase , help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name' , type=_UpperCamelCase , required=_UpperCamelCase , help='The teacher model.' ) parser.add_argument('--temperature' , default=2.0 , type=_UpperCamelCase , help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce' , default=0.5 , type=_UpperCamelCase , help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm' , default=0.0 , type=_UpperCamelCase , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , ) parser.add_argument('--alpha_clm' , default=0.5 , type=_UpperCamelCase , help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse' , default=0.0 , type=_UpperCamelCase , help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos' , default=0.0 , type=_UpperCamelCase , help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop' , default=0.15 , type=_UpperCamelCase , help='Proportion of tokens for which we need to make a prediction.' , ) parser.add_argument('--word_mask' , default=0.8 , type=_UpperCamelCase , help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep' , default=0.1 , type=_UpperCamelCase , help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand' , default=0.1 , type=_UpperCamelCase , help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing' , default=0.7 , type=_UpperCamelCase , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , ) parser.add_argument('--token_counts' , type=_UpperCamelCase , help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , ) parser.add_argument( '--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , ) parser.add_argument( '--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , ) parser.add_argument('--n_epoch' , type=_UpperCamelCase , default=3 , help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size' , type=_UpperCamelCase , default=5 , help='Batch size (for each process).' ) parser.add_argument( '--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , ) parser.add_argument( '--gradient_accumulation_steps' , type=_UpperCamelCase , default=5_0 , help='Gradient accumulation for larger training batches.' , ) parser.add_argument('--warmup_prop' , default=0.05 , type=_UpperCamelCase , help='Linear warmup proportion.' ) parser.add_argument('--weight_decay' , default=0.0 , type=_UpperCamelCase , help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate' , default=5e-4 , type=_UpperCamelCase , help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon' , default=1e-6 , type=_UpperCamelCase , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , default=5.0 , type=_UpperCamelCase , help='Max gradient norm.' ) parser.add_argument('--initializer_range' , default=0.02 , type=_UpperCamelCase , help='Random initialization range.' ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=_UpperCamelCase , default='O1' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_gpu' , type=_UpperCamelCase , default=1 , help='Number of GPUs in the node.' ) parser.add_argument('--local_rank' , type=_UpperCamelCase , default=-1 , help='Distributed training - Local rank' ) parser.add_argument('--seed' , type=_UpperCamelCase , default=5_6 , help='Random seed' ) parser.add_argument('--log_interval' , type=_UpperCamelCase , default=5_0_0 , help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval' , type=_UpperCamelCase , default=4_0_0_0 , help='Checkpoint interval.' ) snake_case = parser.parse_args() sanity_checks(_UpperCamelCase ) # ARGS # init_gpu_params(_UpperCamelCase ) set_seed(_UpperCamelCase ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite""" ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" ) # SAVE PARAMS # logger.info(f"""Param: {args}""" ) with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f: json.dump(vars(_UpperCamelCase ) , _UpperCamelCase , indent=4 ) git_log(args.dump_path ) snake_case ,snake_case ,snake_case = MODEL_CLASSES[args.student_type] snake_case ,snake_case ,snake_case = MODEL_CLASSES[args.teacher_type] # TOKENIZER # snake_case = teacher_tokenizer_class.from_pretrained(args.teacher_name ) snake_case = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): snake_case = tokenizer.all_special_tokens.index(_UpperCamelCase ) snake_case = tokenizer.all_special_ids[idx] logger.info(f"""Special tokens {special_tok_ids}""" ) snake_case = special_tok_ids snake_case = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"""Loading data from {args.data_file}""" ) with open(args.data_file , 'rb' ) as fp: snake_case = pickle.load(_UpperCamelCase ) if args.mlm: logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" ) with open(args.token_counts , 'rb' ) as fp: snake_case = pickle.load(_UpperCamelCase ) snake_case = np.maximum(_UpperCamelCase , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): snake_case = 0.0 # do not predict special tokens snake_case = torch.from_numpy(_UpperCamelCase ) else: snake_case = None snake_case = LmSeqsDataset(params=_UpperCamelCase , data=_UpperCamelCase ) logger.info('Data loader created.' ) # STUDENT # logger.info(f"""Loading student config from {args.student_config}""" ) snake_case = student_config_class.from_pretrained(args.student_config ) snake_case = True if args.student_pretrained_weights is not None: logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" ) snake_case = student_model_class.from_pretrained(args.student_pretrained_weights , config=_UpperCamelCase ) else: snake_case = student_model_class(_UpperCamelCase ) if args.n_gpu > 0: student.to(f"""cuda:{args.local_rank}""" ) logger.info('Student loaded.' ) # TEACHER # snake_case = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_UpperCamelCase ) if args.n_gpu > 0: teacher.to(f"""cuda:{args.local_rank}""" ) logger.info(f"""Teacher loaded from {args.teacher_name}.""" ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_UpperCamelCase , _UpperCamelCase ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_UpperCamelCase , _UpperCamelCase ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() snake_case = Distiller( params=_UpperCamelCase , dataset=_UpperCamelCase , token_probs=_UpperCamelCase , student=_UpperCamelCase , teacher=_UpperCamelCase ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
104
0
"""simple docstring""" from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class lowerCAmelCase__ : def lowercase ( self : Optional[Any] , _lowerCamelCase : int ): raise NotImplementedError() def lowercase ( self : List[str] ): raise NotImplementedError() class lowerCAmelCase__ ( A_ ): def __init__( self : Any , _lowerCamelCase : "AutoTokenizer" , _lowerCamelCase : bool = False , **_lowerCamelCase : Optional[int] ): _snake_case = tokenizer _snake_case = skip_prompt _snake_case = decode_kwargs # variables used in the streaming process _snake_case = [] _snake_case = 0 _snake_case = True def lowercase ( self : int , _lowerCamelCase : str ): if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError('''TextStreamer only supports batch size 1''' ) elif len(value.shape ) > 1: _snake_case = value[0] if self.skip_prompt and self.next_tokens_are_prompt: _snake_case = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) _snake_case = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith('''\n''' ): _snake_case = text[self.print_len :] _snake_case = [] _snake_case = 0 # If the last token is a CJK character, we print the characters. elif len(_lowerCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ): _snake_case = text[self.print_len :] self.print_len += len(_lowerCamelCase ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: _snake_case = text[self.print_len : text.rfind(''' ''' ) + 1] self.print_len += len(_lowerCamelCase ) self.on_finalized_text(_lowerCamelCase ) def lowercase ( self : List[Any] ): # Flush the cache, if it exists if len(self.token_cache ) > 0: _snake_case = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) _snake_case = text[self.print_len :] _snake_case = [] _snake_case = 0 else: _snake_case = '''''' _snake_case = True self.on_finalized_text(_lowerCamelCase , stream_end=_lowerCamelCase ) def lowercase ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : bool = False ): print(_lowerCamelCase , flush=_lowerCamelCase , end='''''' if not stream_end else None ) def lowercase ( self : Union[str, Any] , _lowerCamelCase : Dict ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4e_00 and cp <= 0x9f_ff) or (cp >= 0x34_00 and cp <= 0x4d_bf) # or (cp >= 0x2_00_00 and cp <= 0x2_a6_df) # or (cp >= 0x2_a7_00 and cp <= 0x2_b7_3f) # or (cp >= 0x2_b7_40 and cp <= 0x2_b8_1f) # or (cp >= 0x2_b8_20 and cp <= 0x2_ce_af) # or (cp >= 0xf9_00 and cp <= 0xfa_ff) or (cp >= 0x2_f8_00 and cp <= 0x2_fa_1f) # ): # return True return False class lowerCAmelCase__ ( A_ ): def __init__( self : Union[str, Any] , _lowerCamelCase : "AutoTokenizer" , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[float] = None , **_lowerCamelCase : Optional[Any] ): super().__init__(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) _snake_case = Queue() _snake_case = None _snake_case = timeout def lowercase ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : bool = False ): self.text_queue.put(_lowerCamelCase , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self : int ): return self def lowercase ( self : List[str] ): _snake_case = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
224
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { 'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json', } class lowerCAmelCase__ ( A_ ): __a = """mgp-str""" def __init__( self : int , _lowerCamelCase : str=[32, 128] , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Tuple=3 , _lowerCamelCase : Optional[Any]=27 , _lowerCamelCase : str=38 , _lowerCamelCase : int=50257 , _lowerCamelCase : Tuple=30522 , _lowerCamelCase : Any=768 , _lowerCamelCase : Dict=12 , _lowerCamelCase : Union[str, Any]=12 , _lowerCamelCase : Any=4.0 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : int=1e-5 , _lowerCamelCase : Dict=0.0 , _lowerCamelCase : int=0.0 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str=0.0_2 , **_lowerCamelCase : Optional[Any] , ): super().__init__(**_lowerCamelCase ) _snake_case = image_size _snake_case = patch_size _snake_case = num_channels _snake_case = max_token_length _snake_case = num_character_labels _snake_case = num_bpe_labels _snake_case = num_wordpiece_labels _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = mlp_ratio _snake_case = distilled _snake_case = layer_norm_eps _snake_case = drop_rate _snake_case = qkv_bias _snake_case = attn_drop_rate _snake_case = drop_path_rate _snake_case = output_aa_attentions _snake_case = initializer_range
224
1
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int: if exponent == 1: return base if exponent % 2 == 0: snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int: snake_case : int = base for _ in range(1 ,lowercase ): snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits ) return result if __name__ == "__main__": print(f"""{solution() = }""")
684
import numpy as np def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array: return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
684
1
"""simple docstring""" import math import random def __A ( a_ :float , a_ :bool = False) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value)) # Initial Value A = 0.02 def __A ( a_ :int , a_ :int) -> float: __a : List[Any] = float(2 * (random.randint(1 , 1_00)) - 1) for _ in range(a_): # Forward propagation __a : int = sigmoid_function(INITIAL_VALUE * weight) # How much did we miss? __a : int = (expected / 1_00) - layer_a # Error delta __a : Dict = layer_1_error * sigmoid_function(a_ , a_) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 1_00 if __name__ == "__main__": import doctest doctest.testmod() A = int(input('''Expected value: ''')) A = int(input('''Number of propagations: ''')) print(forward_propagation(expected, number_propagations))
52
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if density <= 0: raise ValueError('Impossible fluid density' ) if bulk_modulus <= 0: raise ValueError('Impossible bulk modulus' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
669
0
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def A_ ( _lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : Any = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2] _lowerCamelCase : int = True if '''large''' in model_name or '''huge''' in model_name else False _lowerCamelCase : Dict = True if '''large''' in model_name or '''huge''' in model_name else False _lowerCamelCase : List[str] = True if '''large''' in model_name or '''huge''' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: _lowerCamelCase : int = [3, 3, 3, 3] _lowerCamelCase : str = [5, 5, 5, 5] elif "fl4" in model_name: _lowerCamelCase : List[str] = [4, 4, 4, 4] _lowerCamelCase : Union[str, Any] = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: _lowerCamelCase : Any = [3, 3, 3, 3] if "lrf" in model_name: _lowerCamelCase : Optional[Any] = [3, 3, 3, 3] else: _lowerCamelCase : Any = [2, 2, 2, 2] if "tiny" in model_name: _lowerCamelCase : Dict = 96 elif "small" in model_name: _lowerCamelCase : Any = 96 elif "base" in model_name: _lowerCamelCase : Union[str, Any] = 128 elif "large" in model_name: _lowerCamelCase : List[Any] = 192 elif "xlarge" in model_name: _lowerCamelCase : List[Any] = 256 elif "huge" in model_name: _lowerCamelCase : Optional[Any] = 352 # set label information _lowerCamelCase : Dict = '''huggingface/label-files''' if "large" in model_name or "huge" in model_name: _lowerCamelCase : Optional[Any] = '''imagenet-22k-id2label.json''' else: _lowerCamelCase : Union[str, Any] = '''imagenet-1k-id2label.json''' _lowerCamelCase : Tuple = json.load(open(hf_hub_download(a_ , a_ , repo_type="dataset" ) , "r" ) ) _lowerCamelCase : Dict = {int(a_ ): v for k, v in idalabel.items()} _lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()} _lowerCamelCase : Any = FocalNetConfig( embed_dim=a_ , depths=a_ , focal_levels=a_ , focal_windows=a_ , use_conv_embed=a_ , idalabel=a_ , labelaid=a_ , use_post_layernorm=a_ , use_layerscale=a_ , ) return config def A_ ( _lowerCAmelCase : List[Any] ): """simple docstring""" if "patch_embed.proj" in name: _lowerCamelCase : Dict = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: _lowerCamelCase : Dict = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: _lowerCamelCase : Any = '''encoder.''' + name if "encoder.layers" in name: _lowerCamelCase : Any = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: _lowerCamelCase : Optional[int] = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: _lowerCamelCase : int = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: _lowerCamelCase : int = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: _lowerCamelCase : Union[str, Any] = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: _lowerCamelCase : Tuple = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": _lowerCamelCase : Tuple = '''layernorm.weight''' if name == "norm.bias": _lowerCamelCase : List[Any] = '''layernorm.bias''' if "head" in name: _lowerCamelCase : List[Any] = name.replace("head" , "classifier" ) else: _lowerCamelCase : Tuple = '''focalnet.''' + name return name def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any]=False ): """simple docstring""" _lowerCamelCase : Optional[Any] = { '''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''', '''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''', '''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''', '''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''', '''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''', '''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''', '''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''', '''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''', '''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''', '''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''', } # fmt: on _lowerCamelCase : Optional[int] = model_name_to_url[model_name] print("Checkpoint URL: " , a_ ) _lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(a_ , map_location="cpu" )['''model'''] # rename keys for key in state_dict.copy().keys(): _lowerCamelCase : Union[str, Any] = state_dict.pop(a_ ) _lowerCamelCase : Union[str, Any] = val _lowerCamelCase : Optional[int] = get_focalnet_config(a_ ) _lowerCamelCase : Union[str, Any] = FocalNetForImageClassification(a_ ) model.eval() # load state dict model.load_state_dict(a_ ) # verify conversion _lowerCamelCase : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _lowerCamelCase : Tuple = BitImageProcessor( do_resize=a_ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=a_ , crop_size=224 , do_normalize=a_ , image_mean=a_ , image_std=a_ , ) _lowerCamelCase : Dict = Image.open(requests.get(a_ , stream=a_ ).raw ) _lowerCamelCase : Union[str, Any] = processor(images=a_ , return_tensors="pt" ) _lowerCamelCase : Dict = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) _lowerCamelCase : List[str] = image_transforms(a_ ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , a_ , atol=1E-4 ) _lowerCamelCase : Optional[Any] = model(**a_ ) _lowerCamelCase : int = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": _lowerCamelCase : Optional[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": _lowerCamelCase : Dict = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": _lowerCamelCase : Any = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": _lowerCamelCase : List[str] = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": _lowerCamelCase : Any = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": _lowerCamelCase : Dict = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , a_ , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(a_ ) processor.save_pretrained(a_ ) if push_to_hub: print(F'Pushing model and processor of {model_name} to the hub...' ) model.push_to_hub(F'{model_name}' ) processor.push_to_hub(F'{model_name}' ) if __name__ == "__main__": UpperCAmelCase_ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='focalnet-tiny', type=str, help='Name of the FocalNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub.', ) UpperCAmelCase_ : List[str] = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
712
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase_ : Any = logging.get_logger(__name__) UpperCAmelCase_ : Any = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } UpperCAmelCase_ : List[str] = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128} class UpperCAmelCase__ ( A ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ['input_ids', 'attention_mask'] lowerCAmelCase_ = BlenderbotTokenizer def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,): super().__init__( __A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,) _lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space: _lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) ) _lowerCamelCase : List[Any] = add_prefix_space _lowerCamelCase : Optional[Any] = pre_tok_class(**__A ) _lowerCamelCase : Any = add_prefix_space _lowerCamelCase : Any = "post_processor" _lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A ) if tokenizer_component_instance: _lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCamelCase : List[Any] = tuple(state["sep"] ) if "cls" in state: _lowerCamelCase : Optional[int] = tuple(state["cls"] ) _lowerCamelCase : List[str] = False if state.get("add_prefix_space",__A ) != add_prefix_space: _lowerCamelCase : List[str] = add_prefix_space _lowerCamelCase : int = True if state.get("trim_offsets",__A ) != trim_offsets: _lowerCamelCase : List[str] = trim_offsets _lowerCamelCase : Dict = True if changes_to_apply: _lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) ) _lowerCamelCase : Union[str, Any] = component_class(**__A ) setattr(self.backend_tokenizer,__A,__A ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def lowerCamelCase_ ( self : Optional[int] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ): _lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value _lowerCamelCase : List[str] = value def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ): _lowerCamelCase : int = kwargs.get("is_split_into_words",__A ) assert self.add_prefix_space or not is_split_into_words, ( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__A,**__A ) def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ): _lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A ) assert self.add_prefix_space or not is_split_into_words, ( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*__A,**__A ) def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ): _lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A ) return tuple(__A ) def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ): _lowerCamelCase : int = [self.sep_token_id] _lowerCamelCase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def lowerCamelCase_ ( self : List[str],__A : "Conversation" ): _lowerCamelCase : Dict = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__A ) _lowerCamelCase : List[Any] = " ".join(__A ) _lowerCamelCase : List[str] = self.encode(__A ) if len(__A ) > self.model_max_length: _lowerCamelCase : Tuple = input_ids[-self.model_max_length :] logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
11
0
from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def _a ( ) -> Optional[int]: """simple docstring""" __UpperCamelCase = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=__lowerCAmelCase ) __UpperCamelCase = parser.add_subparsers(help='accelerate command helpers' ) # Register commands get_config_parser(subparsers=__lowerCAmelCase ) env_command_parser(subparsers=__lowerCAmelCase ) launch_command_parser(subparsers=__lowerCAmelCase ) tpu_command_parser(subparsers=__lowerCAmelCase ) test_command_parser(subparsers=__lowerCAmelCase ) # Let's go __UpperCamelCase = parser.parse_args() if not hasattr(__lowerCAmelCase , 'func' ): parser.print_help() exit(1 ) # Run args.func(__lowerCAmelCase ) if __name__ == "__main__": main()
383
"""simple docstring""" import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class __a : '''simple docstring''' _SCREAMING_SNAKE_CASE :Optional[Union[str, Path]] = None _SCREAMING_SNAKE_CASE :bool = False _SCREAMING_SNAKE_CASE :bool = False _SCREAMING_SNAKE_CASE :bool = False _SCREAMING_SNAKE_CASE :Optional[Dict] = None _SCREAMING_SNAKE_CASE :Optional[str] = None _SCREAMING_SNAKE_CASE :bool = False _SCREAMING_SNAKE_CASE :bool = False _SCREAMING_SNAKE_CASE :bool = False _SCREAMING_SNAKE_CASE :bool = True _SCREAMING_SNAKE_CASE :Optional[int] = None _SCREAMING_SNAKE_CASE :int = 1 _SCREAMING_SNAKE_CASE :Optional[Union[str, bool]] = None _SCREAMING_SNAKE_CASE :bool = False _SCREAMING_SNAKE_CASE :Optional[Dict] = None _SCREAMING_SNAKE_CASE :Optional[str] = None def _a ( self ) -> "DownloadConfig": """simple docstring""" return self.__class__(**{k: copy.deepcopy(_a ) for k, v in self.__dict__.items()} )
680
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""", # See all SEW models at https://huggingface.co/models?filter=sew } class a ( UpperCAmelCase__ ): UpperCamelCase : int = 'sew' def __init__( self : Any , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Any=768 , lowerCAmelCase : str=12 , lowerCAmelCase : str=12 , lowerCAmelCase : List[Any]=3072 , lowerCAmelCase : int=2 , lowerCAmelCase : str="gelu" , lowerCAmelCase : int=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : str=0.0 , lowerCAmelCase : str=0.1 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Optional[Any]=0.0_2 , lowerCAmelCase : Optional[Any]=1E-5 , lowerCAmelCase : Dict="group" , lowerCAmelCase : List[str]="gelu" , lowerCAmelCase : Any=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCAmelCase : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCAmelCase : List[str]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : int=16 , lowerCAmelCase : List[str]=True , lowerCAmelCase : Optional[Any]=0.0_5 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : str=10 , lowerCAmelCase : int=0 , lowerCAmelCase : Dict="mean" , lowerCAmelCase : Any=False , lowerCAmelCase : Dict=False , lowerCAmelCase : Optional[Any]=256 , lowerCAmelCase : Dict=0 , lowerCAmelCase : Union[str, Any]=1 , lowerCAmelCase : Tuple=2 , **lowerCAmelCase : Dict , ) -> str: '''simple docstring''' super().__init__(**lowerCAmelCase , pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Dict =hidden_size SCREAMING_SNAKE_CASE_: Dict =feat_extract_norm SCREAMING_SNAKE_CASE_: Dict =feat_extract_activation SCREAMING_SNAKE_CASE_: Tuple =list(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: str =list(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: str =list(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] =conv_bias SCREAMING_SNAKE_CASE_: Optional[Any] =num_conv_pos_embeddings SCREAMING_SNAKE_CASE_: Optional[Any] =num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE_: List[Any] =len(self.conv_dim ) SCREAMING_SNAKE_CASE_: Any =num_hidden_layers SCREAMING_SNAKE_CASE_: int =intermediate_size SCREAMING_SNAKE_CASE_: Tuple =squeeze_factor SCREAMING_SNAKE_CASE_: Dict =hidden_act SCREAMING_SNAKE_CASE_: Tuple =num_attention_heads SCREAMING_SNAKE_CASE_: List[Any] =hidden_dropout SCREAMING_SNAKE_CASE_: int =attention_dropout SCREAMING_SNAKE_CASE_: Dict =activation_dropout SCREAMING_SNAKE_CASE_: List[str] =feat_proj_dropout SCREAMING_SNAKE_CASE_: Dict =final_dropout SCREAMING_SNAKE_CASE_: Optional[Any] =layerdrop SCREAMING_SNAKE_CASE_: Any =layer_norm_eps SCREAMING_SNAKE_CASE_: Optional[Any] =initializer_range SCREAMING_SNAKE_CASE_: int =vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE_: Dict =apply_spec_augment SCREAMING_SNAKE_CASE_: Optional[int] =mask_time_prob SCREAMING_SNAKE_CASE_: Optional[int] =mask_time_length SCREAMING_SNAKE_CASE_: Dict =mask_time_min_masks SCREAMING_SNAKE_CASE_: Dict =mask_feature_prob SCREAMING_SNAKE_CASE_: Tuple =mask_feature_length SCREAMING_SNAKE_CASE_: Tuple =mask_feature_min_masks # ctc loss SCREAMING_SNAKE_CASE_: Union[str, Any] =ctc_loss_reduction SCREAMING_SNAKE_CASE_: List[Any] =ctc_zero_infinity # sequence classification SCREAMING_SNAKE_CASE_: str =use_weighted_layer_sum SCREAMING_SNAKE_CASE_: List[str] =classifier_proj_size @property def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
700
"""simple docstring""" from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class a ( yaml.SafeLoader ): def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Any =[self.constructed_objects[key_node] for key_node, _ in node.value] SCREAMING_SNAKE_CASE_: Any =[tuple(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else key for key in keys] SCREAMING_SNAKE_CASE_: Dict =Counter(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] =[key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' ) def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=False ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE_: Tuple =super().construct_mapping(lowerCAmelCase , deep=lowerCAmelCase ) self._check_no_duplicates_on_constructed_node(lowerCAmelCase ) return mapping def __magic_name__ ( lowercase ): SCREAMING_SNAKE_CASE_: Dict =list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: SCREAMING_SNAKE_CASE_: Union[str, Any] =full_content[1:].index("""---""" ) + 1 SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(lowercase ) class a ( UpperCAmelCase__ ): # class attributes UpperCamelCase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata @classmethod def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Path ) -> "DatasetMetadata": '''simple docstring''' with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =_split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(lowerCAmelCase ) else: return cls() def lowerCamelCase__ ( self : Any , lowerCAmelCase : Path ) -> List[str]: '''simple docstring''' if path.exists(): with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file: SCREAMING_SNAKE_CASE_: str =readme_file.read() else: SCREAMING_SNAKE_CASE_: str =None SCREAMING_SNAKE_CASE_: Tuple =self._to_readme(lowerCAmelCase ) with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as readme_file: readme_file.write(lowerCAmelCase ) def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[str] = None ) -> str: '''simple docstring''' if readme_content is not None: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =_split_yaml_from_readme(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] ="""---\n""" + self.to_yaml_string() + """---\n""" + content else: SCREAMING_SNAKE_CASE_: List[Any] ="""---\n""" + self.to_yaml_string() + """---\n""" return full_content @classmethod def lowerCamelCase__ ( cls : Optional[int] , lowerCAmelCase : str ) -> "DatasetMetadata": '''simple docstring''' SCREAMING_SNAKE_CASE_: int =yaml.load(lowerCAmelCase , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields SCREAMING_SNAKE_CASE_: List[Any] ={ (key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**lowerCAmelCase ) def lowerCamelCase__ ( self : Dict ) -> str: '''simple docstring''' return yaml.safe_dump( { (key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=lowerCAmelCase , allow_unicode=lowerCAmelCase , encoding="""utf-8""" , ).decode("""utf-8""" ) _UpperCAmelCase = { """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser _UpperCAmelCase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") _UpperCAmelCase = ap.parse_args() _UpperCAmelCase = Path(args.readme_filepath) _UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
36
0
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
662
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]: """simple docstring""" def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ): A : Optional[int] = round(val / multiple ) * multiple if max_val is not None and x > max_val: A : Optional[Any] = math.floor(val / multiple ) * multiple if x < min_val: A : Any = math.ceil(val / multiple ) * multiple return x A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size A , A : List[Any] = get_image_size(_lowerCAmelCase ) A , A : List[Any] = output_size # determine new height and width A : Optional[int] = output_height / input_height A : Optional[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width A : Any = scale_width else: # fit height A : int = scale_height A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase ) A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase ) return (new_height, new_width) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : Optional[int] = ["pixel_values"] def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__(**lowerCamelCase__ ) A : int = size if size is not None else {"""height""": 384, """width""": 384} A : str = get_size_dict(lowerCamelCase__ ) A : Optional[Any] = do_resize A : Optional[int] = size A : Union[str, Any] = keep_aspect_ratio A : int = ensure_multiple_of A : Dict = resample A : Optional[Any] = do_rescale A : Any = rescale_factor A : str = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ): A : Dict = get_size_dict(lowerCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) A : Optional[Any] = get_resize_output_image_size( lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, ) return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ): A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize A : str = size if size is not None else self.size A : str = get_size_dict(lowerCamelCase__ ) A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of A : Tuple = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : int = rescale_factor if rescale_factor is not None else self.rescale_factor A : int = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : Optional[int] = image_std if image_std is not None else self.image_std A : Any = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. A : str = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_resize: A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images] if do_rescale: A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images] if do_normalize: A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images] A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images] A : Optional[int] = {"""pixel_values""": images} return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ): A : Any = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCamelCase__ ) != len(lowerCamelCase__ ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(lowerCamelCase__ ): A : int = target_sizes.numpy() A : Union[str, Any] = [] for idx in range(len(lowerCamelCase__ ) ): A : int = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ ) A : Tuple = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCamelCase__ ) else: A : List[str] = logits.argmax(dim=1 ) A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
662
1
"""simple docstring""" import os def lowercase ( ) -> Any: with open(os.path.dirname(__UpperCamelCase ) + '''/p022_names.txt''' ) as file: __magic_name__ = str(file.readlines()[0] ) __magic_name__ = names.replace('''"''' , '''''' ).split(''',''' ) names.sort() __magic_name__ = 0 __magic_name__ = 0 for i, name in enumerate(__UpperCamelCase ): for letter in name: name_score += ord(__UpperCamelCase ) - 64 total_score += (i + 1) * name_score __magic_name__ = 0 return total_score if __name__ == "__main__": print(solution())
720
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json" # See all FNet models at https://huggingface.co/models?filter=fnet } class _lowercase ( __UpperCAmelCase ): _lowerCamelCase = '''fnet''' def __init__( self , UpperCamelCase_=3_2000 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu_new" , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=4 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-1_2 , UpperCamelCase_=False , UpperCamelCase_=512 , UpperCamelCase_=3 , UpperCamelCase_=1 , UpperCamelCase_=2 , **UpperCamelCase_ , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __magic_name__ = vocab_size __magic_name__ = max_position_embeddings __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = initializer_range __magic_name__ = type_vocab_size __magic_name__ = layer_norm_eps __magic_name__ = use_tpu_fourier_optimizations __magic_name__ = tpu_short_seq_length
190
0
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" __A = inspect.getfile(accelerate.test_utils ) __A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] ) __A = ["accelerate", "launch"] __A = Path.home() / ".cache/huggingface/accelerate" __A = "default_config.yaml" __A = config_folder / config_file __A = config_folder / "_default_config.yaml" __A = Path("""tests/test_configs""" ) @classmethod def __lowerCAmelCase ( cls ): """simple docstring""" if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def __lowerCAmelCase ( cls ): """simple docstring""" if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def __lowerCAmelCase ( self ): """simple docstring""" for config in sorted(self.test_config_path.glob('**/*.yaml' ) ): with self.subTest(config_file=_UpperCamelCase ): execute_subprocess_async( self.base_cmd + ['--config_file', str(_UpperCamelCase ), self.test_file_path] , env=os.environ.copy() ) def __lowerCAmelCase ( self ): """simple docstring""" execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() ) class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" __A = "test-tpu" __A = "us-central1-a" __A = "ls" __A = ["accelerate", "tpu-config"] __A = "cd /usr/share" __A = "tests/test_samples/test_command_file.sh" __A = "Running gcloud compute tpus tpu-vm ssh" def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = run_command( self.cmd + ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_UpperCamelCase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCamelCase , ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_UpperCamelCase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCamelCase , ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_UpperCamelCase ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCamelCase , ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_UpperCamelCase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCamelCase , ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--command', 'echo \"Hello World\"', '--debug', ] , return_stdout=_UpperCamelCase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _UpperCamelCase , ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_UpperCamelCase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCamelCase , ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command_file', self.command_file, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_UpperCamelCase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCamelCase , ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_UpperCamelCase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCamelCase , ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--accelerate_version', '12.0.0', '--debug', ] , return_stdout=_UpperCamelCase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCamelCase , )
187
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCamelCase_ = 16 lowerCamelCase_ = 32 def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase = 16 ): SCREAMING_SNAKE_CASE__ =AutoTokenizer.from_pretrained("""bert-base-cased""" ) SCREAMING_SNAKE_CASE__ =load_dataset("""glue""", """mrpc""" ) def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE__ =tokenizer(examples["""sentence1"""], examples["""sentence2"""], truncation=__UpperCamelCase, max_length=__UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE__ =datasets.map( __UpperCamelCase, batched=__UpperCamelCase, remove_columns=["""idx""", """sentence1""", """sentence2"""], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE__ =tokenized_datasets.rename_column("""label""", """labels""" ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE__ =128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE__ =16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE__ =8 else: SCREAMING_SNAKE_CASE__ =None return tokenizer.pad( __UpperCamelCase, padding="""longest""", max_length=__UpperCamelCase, pad_to_multiple_of=__UpperCamelCase, return_tensors="""pt""", ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE__ =DataLoader( tokenized_datasets["""train"""], shuffle=__UpperCamelCase, collate_fn=__UpperCamelCase, batch_size=__UpperCamelCase ) SCREAMING_SNAKE_CASE__ =DataLoader( tokenized_datasets["""validation"""], shuffle=__UpperCamelCase, collate_fn=__UpperCamelCase, batch_size=__UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCamelCase_ = mocked_dataloaders # noqa: F811 def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ): # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", __UpperCamelCase ) == "1": SCREAMING_SNAKE_CASE__ =2 # New Code # SCREAMING_SNAKE_CASE__ =int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE__ =Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=__UpperCamelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( """Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE__ =config["""lr"""] SCREAMING_SNAKE_CASE__ =int(config["""num_epochs"""] ) SCREAMING_SNAKE_CASE__ =int(config["""seed"""] ) SCREAMING_SNAKE_CASE__ =int(config["""batch_size"""] ) SCREAMING_SNAKE_CASE__ =evaluate.load("""glue""", """mrpc""" ) set_seed(__UpperCamelCase ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =get_dataloaders(__UpperCamelCase, __UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE__ =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""", return_dict=__UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE__ =model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE__ =AdamW(params=model.parameters(), lr=__UpperCamelCase ) # Instantiate scheduler SCREAMING_SNAKE_CASE__ =get_linear_schedule_with_warmup( optimizer=__UpperCamelCase, num_warmup_steps=100, num_training_steps=(len(__UpperCamelCase ) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =accelerator.prepare( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ) # Now we train the model for epoch in range(__UpperCamelCase ): model.train() for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__UpperCamelCase ): SCREAMING_SNAKE_CASE__ =model(**__UpperCamelCase ) SCREAMING_SNAKE_CASE__ =output.loss accelerator.backward(__UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ =model(**__UpperCamelCase ) SCREAMING_SNAKE_CASE__ =outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__UpperCamelCase, references=__UpperCamelCase, ) SCREAMING_SNAKE_CASE__ =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""", __UpperCamelCase ) def UpperCAmelCase_ ( ): SCREAMING_SNAKE_CASE__ =argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""", type=__UpperCamelCase, default=__UpperCamelCase, choices=["""no""", """fp16""", """bf16""", """fp8"""], help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""", ) # New Code # parser.add_argument( """--gradient_accumulation_steps""", type=__UpperCamelCase, default=1, help="""The number of minibatches to be ran before gradients are accumulated.""", ) parser.add_argument("""--cpu""", action="""store_true""", help="""If passed, will train on the CPU.""" ) SCREAMING_SNAKE_CASE__ =parser.parse_args() SCREAMING_SNAKE_CASE__ ={"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__UpperCamelCase, __UpperCamelCase ) if __name__ == "__main__": main()
151
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: __lowerCamelCase = None __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} __lowerCamelCase = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', }, 'tokenizer_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json', }, } __lowerCamelCase = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } __lowerCamelCase = '▁' # Segments (not really needed) __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = 2 __lowerCamelCase = 3 __lowerCamelCase = 4 class _lowercase ( __lowercase ): _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = '''left''' _lowerCamelCase = XLNetTokenizer def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<sep>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<cls>" , UpperCamelCase_="<mask>" , UpperCamelCase_=["<eop>", "<eod>"] , **UpperCamelCase_ , ): # Mask token behave like a normal word, i.e. include the space before it __magic_name__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( vocab_file=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __magic_name__ = 3 __magic_name__ = do_lower_case __magic_name__ = remove_space __magic_name__ = keep_accents __magic_name__ = vocab_file __magic_name__ = False if not self.vocab_file else True def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __magic_name__ = [self.sep_token_id] __magic_name__ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __magic_name__ = [self.sep_token_id] __magic_name__ = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __magic_name__ = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.vocab_file , UpperCamelCase_ ) return (out_vocab_file,)
706
"""simple docstring""" from __future__ import annotations from random import choice def lowercase ( __UpperCamelCase ) -> Any: return choice(__UpperCamelCase ) def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> int: __magic_name__ = random_pivot(__UpperCamelCase ) # partition based on pivot # linear time __magic_name__ = [e for e in lst if e < pivot] __magic_name__ = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(__UpperCamelCase ) == k - 1: return pivot # pivot is in elements bigger than k elif len(__UpperCamelCase ) < k - 1: return kth_number(__UpperCamelCase , k - len(__UpperCamelCase ) - 1 ) # pivot is in elements smaller than k else: return kth_number(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
190
0
"""simple docstring""" import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class _UpperCamelCase ( unittest.TestCase): def A (self ): """simple docstring""" A__ = logging.get_logger() # the current default level is logging.WARNING A__ = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(lowerCamelCase__ ) def A (self ): """simple docstring""" A__ = logging.get_verbosity() A__ = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) A__ = """Testing 1, 2, 3""" # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(lowerCamelCase__ ) as cl: logger.warning(lowerCamelCase__ ) self.assertEqual(cl.out , msg + """\n""" ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(lowerCamelCase__ ) as cl: logger.warning(lowerCamelCase__ ) self.assertEqual(cl.out , """""" ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(lowerCamelCase__ ) as cl: logger.warning(lowerCamelCase__ ) self.assertEqual(cl.out , msg + """\n""" ) # restore to the original level logging.set_verbosity(lowerCamelCase__ ) @mockenv(TRANSFORMERS_VERBOSITY="""error""" ) def A (self ): """simple docstring""" # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var A__ = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) A__ = os.getenv("""TRANSFORMERS_VERBOSITY""" , lowerCamelCase__ ) A__ = logging.log_levels[env_level_str] A__ = logging.get_verbosity() self.assertEqual( lowerCamelCase__ , lowerCamelCase__ , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , ) # restore to the original level A__ = """""" transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY="""super-error""" ) def A (self ): """simple docstring""" # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() A__ = logging.logging.getLogger() with CaptureLogger(lowerCamelCase__ ) as cl: # this action activates the env var logging.get_logger("""transformers.models.bart.tokenization_bart""" ) self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out ) # no need to restore as nothing was changed def A (self ): """simple docstring""" # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() A__ = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) A__ = """Testing 1, 2, 3""" with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ): # nothing should be logged as env var disables this method with CaptureLogger(lowerCamelCase__ ) as cl: logger.warning_advice(lowerCamelCase__ ) self.assertEqual(cl.out , """""" ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(lowerCamelCase__ ) as cl: logger.warning_advice(lowerCamelCase__ ) self.assertEqual(cl.out , msg + """\n""" ) def _SCREAMING_SNAKE_CASE ( ): disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
574
"""simple docstring""" import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _UpperCamelCase ( __snake_case , unittest.TestCase): __lowerCamelCase = LongformerTokenizer __lowerCamelCase = True __lowerCamelCase = LongformerTokenizerFast __lowerCamelCase = True def A (self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A__ = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] A__ = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) A__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] A__ = {"""unk_token""": """<unk>"""} A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowerCamelCase__ ) ) def A (self , **lowerCamelCase__ ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def A (self , **lowerCamelCase__ ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def A (self , lowerCamelCase__ ): """simple docstring""" A__ = """lower newer""" A__ = """lower newer""" return input_text, output_text def A (self ): """simple docstring""" A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) A__ = """lower newer""" A__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] A__ = tokenizer.tokenize(lowerCamelCase__ ) # , add_prefix_space=True) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) A__ = tokens + [tokenizer.unk_token] A__ = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ ) def A (self ): """simple docstring""" A__ = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=lowerCamelCase__ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=lowerCamelCase__ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , ) @slow def A (self ): """simple docstring""" A__ = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" ) A__ = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCamelCase__ ) A__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCamelCase__ ) A__ = tokenizer.encode( """sequence builders""" , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ ) A__ = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ ) A__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ) A__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def A (self ): """simple docstring""" A__ = self.get_tokenizer() A__ = """Encode this sequence.""" A__ = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments A__ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ ) A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ ) A__ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ ) A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) A__ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ ) # Testing spaces after special tokens A__ = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ )} ) # mask token has a left space A__ = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) A__ = """Encode <mask> sequence""" A__ = """Encode <mask>sequence""" A__ = tokenizer.encode(lowerCamelCase__ ) A__ = encoded.index(lowerCamelCase__ ) A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) A__ = tokenizer.encode(lowerCamelCase__ ) A__ = encoded.index(lowerCamelCase__ ) A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ ) def A (self ): """simple docstring""" pass def A (self ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): A__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) A__ = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) A__ = """A, <mask> AllenNLP sentence.""" A__ = tokenizer_r.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ ) A__ = tokenizer_p.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) A__ = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) A__ = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual( lowerCamelCase__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( lowerCamelCase__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def A (self ): """simple docstring""" for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): A__ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ ) A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , lowerCamelCase__ ) self.assertEqual(post_processor_state["""add_prefix_space"""] , lowerCamelCase__ ) self.assertEqual(post_processor_state["""trim_offsets"""] , lowerCamelCase__ ) def A (self ): """simple docstring""" # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): A__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` A__ = F"""{text_of_1_token} {text_of_1_token}""" A__ = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ ) A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) A__ = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ ) A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) A__ = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ ) A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase__ ), len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) A__ = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ ) A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase__ ), len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) A__ = F""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) A__ = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ ) A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) A__ = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ ) A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ), 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) A__ = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ ) A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ), 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
574
1
from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __a ( _UpperCAmelCase ): """simple docstring""" _A : List[Any] = "Salesforce/blip-image-captioning-base" _A : List[Any] = ( "This is a tool that generates a description of an image. It takes an input named `image` which should be the " "image to caption, and returns a text that contains the description in English." ) _A : Dict = "image_captioner" _A : Dict = AutoModelForVisionaSeq _A : str = ["image"] _A : Dict = ["text"] def __init__( self : str ,*_UpperCamelCase : List[str] ,**_UpperCamelCase : Union[str, Any] ) -> Optional[int]: '''simple docstring''' requires_backends(self ,["""vision"""] ) super().__init__(*lowercase__ ,**lowercase__ ) def __A ( self : Dict ,_UpperCamelCase : List[str] ) -> Dict: '''simple docstring''' return self.pre_processor(images=lowercase__ ,return_tensors="""pt""" ) def __A ( self : List[Any] ,_UpperCamelCase : Dict ) -> Optional[int]: '''simple docstring''' return self.model.generate(**lowercase__ ) def __A ( self : int ,_UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' return self.pre_processor.batch_decode(lowercase__ ,skip_special_tokens=lowercase__ )[0].strip()
702
def UpperCAmelCase_ ( __UpperCamelCase ): for i in range(len(__UpperCamelCase ) - 1, 0, -1 ): SCREAMING_SNAKE_CASE__ =False for j in range(__UpperCamelCase, 0, -1 ): if unsorted[j] < unsorted[j - 1]: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =unsorted[j - 1], unsorted[j] SCREAMING_SNAKE_CASE__ =True for j in range(__UpperCamelCase ): if unsorted[j] > unsorted[j + 1]: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =unsorted[j + 1], unsorted[j] SCREAMING_SNAKE_CASE__ =True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase_ = input("Enter numbers separated by a comma:\n").strip() lowerCamelCase_ = [int(item) for item in user_input.split(",")] print(f"""{cocktail_shaker_sort(unsorted) = }""")
588
0
def A__ ( snake_case_ : int = 10 , snake_case_ : int = 22 ): SCREAMING_SNAKE_CASE__: List[str]= range(1 , snake_case_ ) SCREAMING_SNAKE_CASE__: str= range(1 , snake_case_ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'''{solution(1_0, 2_2) = }''')
64
import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowercase_ : Optional[int] = logging.get_logger(__name__) def A__ ( snake_case_ : List[Any] ): SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' ) if "model" in sd.keys(): SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model'''] # pop unnecessary weights SCREAMING_SNAKE_CASE__: List[str]= [ '''decoder.version''', '''decoder.output_projection.weight''', ] for key in keys_to_delete: if key in sd: sd.pop(snake_case_ ) SCREAMING_SNAKE_CASE__: str= { '''decoder.project_in_dim.weight''': '''decoder.project_in.weight''', '''decoder.project_out_dim.weight''': '''decoder.project_out.weight''', '''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ ) SCREAMING_SNAKE_CASE__: int= list(sd.keys() ) for key in keys: if ".qkv_proj." in key: SCREAMING_SNAKE_CASE__: int= sd[key] # We split QKV in separate Q,K,V SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' ) SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' ) SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' ) SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 ) SCREAMING_SNAKE_CASE__: List[Any]= q SCREAMING_SNAKE_CASE__: Any= k SCREAMING_SNAKE_CASE__: Optional[Any]= v del sd[key] return sd @torch.no_grad() def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ): SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ ) if config is not None: SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ ) else: SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig() SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval() model.load_state_dict(snake_case_ ) # Check results Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) if __name__ == "__main__": lowercase_ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--fairseq_path', type=str, help=( 'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:' ' https://huggingface.co/models?other=opt_metasq' ), ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.') lowercase_ : int = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
64
1
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __magic_name__ ( __UpperCAmelCase ): __A : Dict = ["image_processor", "tokenizer"] __A : List[str] = "ViltImageProcessor" __A : str = ("BertTokenizer", "BertTokenizerFast") def __init__( self : List[str] , snake_case__ : Optional[int]=None , snake_case__ : Tuple=None , **snake_case__ : Union[str, Any] ): '''simple docstring''' lowercase :Optional[Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , snake_case__ , ) lowercase :Dict = kwargs.pop('''feature_extractor''' ) lowercase :Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(snake_case__ , snake_case__ ) lowercase :Tuple = self.image_processor def __call__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : Dict , ): '''simple docstring''' lowercase :Any = self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) # add pixel_values + pixel_mask lowercase :str = self.image_processor(snake_case__ , return_tensors=snake_case__ ) encoding.update(snake_case__ ) return encoding def __snake_case ( self : Tuple , *snake_case__ : List[str] , **snake_case__ : Tuple ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def __snake_case ( self : str , *snake_case__ : Any , **snake_case__ : Dict ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def __snake_case ( self : Any ): '''simple docstring''' lowercase :Dict = self.tokenizer.model_input_names lowercase :Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __snake_case ( self : Dict ): '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , snake_case__ , ) return self.image_processor_class @property def __snake_case ( self : Optional[Any] ): '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , snake_case__ , ) return self.image_processor
475
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. UpperCAmelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class __magic_name__ ( unittest.TestCase ): __A : int = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __A : List[str] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: __A : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: __A : Optional[int] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def __snake_case ( self : Tuple , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int ): '''simple docstring''' lowercase :Optional[int] = ZeroShotClassificationPipeline( model=snake_case__ , tokenizer=snake_case__ , candidate_labels=['''polics''', '''health'''] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def __snake_case ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Tuple ): '''simple docstring''' lowercase :int = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' ) self.assertEqual(snake_case__ , {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ )], '''scores''': [ANY(snake_case__ )]} ) # No kwarg lowercase :Tuple = classifier('''Who are you voting for in 2020?''' , ['''politics'''] ) self.assertEqual(snake_case__ , {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ )], '''scores''': [ANY(snake_case__ )]} ) lowercase :Tuple = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] ) self.assertEqual(snake_case__ , {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ )], '''scores''': [ANY(snake_case__ )]} ) lowercase :Union[str, Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' ) self.assertEqual( snake_case__ , {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ ), ANY(snake_case__ )], '''scores''': [ANY(snake_case__ ), ANY(snake_case__ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 ) lowercase :Optional[Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] ) self.assertEqual( snake_case__ , {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ ), ANY(snake_case__ )], '''scores''': [ANY(snake_case__ ), ANY(snake_case__ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 ) lowercase :Optional[Any] = classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' ) self.assertEqual(snake_case__ , {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ )], '''scores''': [ANY(snake_case__ )]} ) # https://github.com/huggingface/transformers/issues/13846 lowercase :Optional[Any] = classifier(['''I am happy'''] , ['''positive''', '''negative'''] ) self.assertEqual( snake_case__ , [ {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ ), ANY(snake_case__ )], '''scores''': [ANY(snake_case__ ), ANY(snake_case__ )]} for i in range(1 ) ] , ) lowercase :Tuple = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] ) self.assertEqual( snake_case__ , [ {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ ), ANY(snake_case__ )], '''scores''': [ANY(snake_case__ ), ANY(snake_case__ )]} for i in range(2 ) ] , ) with self.assertRaises(snake_case__ ): classifier('''''' , candidate_labels='''politics''' ) with self.assertRaises(snake_case__ ): classifier(snake_case__ , candidate_labels='''politics''' ) with self.assertRaises(snake_case__ ): classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' ) with self.assertRaises(snake_case__ ): classifier('''Who are you voting for in 2020?''' , candidate_labels=snake_case__ ) with self.assertRaises(snake_case__ ): classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , ) with self.assertRaises(snake_case__ ): classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=snake_case__ , ) self.run_entailment_id(snake_case__ ) def __snake_case ( self : Any , snake_case__ : Pipeline ): '''simple docstring''' lowercase :List[Any] = zero_shot_classifier.model.config lowercase :int = config.labelaid lowercase :str = zero_shot_classifier.entailment_id lowercase :Dict = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) lowercase :Optional[Any] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) lowercase :Tuple = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) lowercase :str = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) lowercase :Optional[Any] = original_labelaid self.assertEqual(snake_case__ , zero_shot_classifier.entailment_id ) @require_torch def __snake_case ( self : List[str] ): '''simple docstring''' lowercase :List[Any] = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( '''Who are you voting for in 2020?''' * 1_0_0 , candidate_labels=['''politics''', '''public health''', '''science'''] ) @require_torch def __snake_case ( self : Union[str, Any] ): '''simple docstring''' lowercase :str = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , ) lowercase :Any = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(snake_case__ ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''science''', '''public health''', '''politics'''], '''scores''': [0.3_33, 0.3_33, 0.3_33], } , ) @require_tf def __snake_case ( self : List[str] ): '''simple docstring''' lowercase :Tuple = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , ) lowercase :List[Any] = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(snake_case__ ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''science''', '''public health''', '''politics'''], '''scores''': [0.3_33, 0.3_33, 0.3_33], } , ) @slow @require_torch def __snake_case ( self : Optional[int] ): '''simple docstring''' lowercase :Dict = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' ) lowercase :Union[str, Any] = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(snake_case__ ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''politics''', '''public health''', '''science'''], '''scores''': [0.9_76, 0.0_15, 0.0_09], } , ) lowercase :Optional[Any] = zero_shot_classifier( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks''' ''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder''' ''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based''' ''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two''' ''' machine translation tasks show these models to be superior in quality while being more parallelizable''' ''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014''' ''' English-to-German translation task, improving over the existing best results, including ensembles by''' ''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new''' ''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small''' ''' fraction of the training costs of the best models from the literature. We show that the Transformer''' ''' generalizes well to other tasks by applying it successfully to English constituency parsing both with''' ''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=snake_case__ , ) self.assertEqual( nested_simplify(snake_case__ ) , { '''sequence''': ( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural''' ''' networks in an encoder-decoder configuration. The best performing models also connect the''' ''' encoder and decoder through an attention mechanism. We propose a new simple network''' ''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence''' ''' and convolutions entirely. Experiments on two machine translation tasks show these models to be''' ''' superior in quality while being more parallelizable and requiring significantly less time to''' ''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,''' ''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014''' ''' English-to-French translation task, our model establishes a new single-model state-of-the-art''' ''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training''' ''' costs of the best models from the literature. We show that the Transformer generalizes well to''' ''' other tasks by applying it successfully to English constituency parsing both with large and''' ''' limited training data.''' ), '''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''], '''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18], } , ) @slow @require_tf def __snake_case ( self : Any ): '''simple docstring''' lowercase :str = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' ) lowercase :Optional[int] = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(snake_case__ ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''politics''', '''public health''', '''science'''], '''scores''': [0.9_76, 0.0_15, 0.0_09], } , ) lowercase :str = zero_shot_classifier( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks''' ''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder''' ''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based''' ''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two''' ''' machine translation tasks show these models to be superior in quality while being more parallelizable''' ''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014''' ''' English-to-German translation task, improving over the existing best results, including ensembles by''' ''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new''' ''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small''' ''' fraction of the training costs of the best models from the literature. We show that the Transformer''' ''' generalizes well to other tasks by applying it successfully to English constituency parsing both with''' ''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=snake_case__ , ) self.assertEqual( nested_simplify(snake_case__ ) , { '''sequence''': ( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural''' ''' networks in an encoder-decoder configuration. The best performing models also connect the''' ''' encoder and decoder through an attention mechanism. We propose a new simple network''' ''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence''' ''' and convolutions entirely. Experiments on two machine translation tasks show these models to be''' ''' superior in quality while being more parallelizable and requiring significantly less time to''' ''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,''' ''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014''' ''' English-to-French translation task, our model establishes a new single-model state-of-the-art''' ''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training''' ''' costs of the best models from the literature. We show that the Transformer generalizes well to''' ''' other tasks by applying it successfully to English constituency parsing both with large and''' ''' limited training data.''' ), '''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''], '''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18], } , )
475
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCamelCase : Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[int] = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : str = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[Any] = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
4
'''simple docstring''' import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder __lowerCamelCase : str = "__DUMMY_TRANSFORMERS_USER__" __lowerCamelCase : Optional[Any] = "Dummy User" __lowerCamelCase : str = "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" __lowerCamelCase : List[Any] = "https://hub-ci.huggingface.co" __lowerCamelCase : List[str] = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}" __lowerCamelCase : Any = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}" __lowerCamelCase : str = Path("~/.huggingface/hub_ci_token").expanduser() @pytest.fixture def UpperCAmelCase_ ( lowerCAmelCase_ ): """simple docstring""" monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , lowerCAmelCase_ ) @pytest.fixture def UpperCAmelCase_ ( lowerCAmelCase_ ): """simple docstring""" monkeypatch.setattr("datasets.config.HF_ENDPOINT" , lowerCAmelCase_ ) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , lowerCAmelCase_ ) @pytest.fixture def UpperCAmelCase_ ( lowerCAmelCase_ ): """simple docstring""" monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , lowerCAmelCase_ ) @pytest.fixture def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" HfFolder.save_token(lowerCAmelCase_ ) yield HfFolder.delete_token() @pytest.fixture(scope="session" ) def UpperCAmelCase_ ( ): """simple docstring""" return HfApi(endpoint=lowerCAmelCase_ ) @pytest.fixture(scope="session" ) def UpperCAmelCase_ ( lowerCAmelCase_ ): """simple docstring""" lowercase = HfFolder.get_token() HfFolder.save_token(lowerCAmelCase_ ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(lowerCAmelCase_ ) @pytest.fixture def UpperCAmelCase_ ( lowerCAmelCase_ ): """simple docstring""" def _cleanup_repo(lowerCAmelCase_ ): hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" ) return _cleanup_repo @pytest.fixture def UpperCAmelCase_ ( lowerCAmelCase_ ): """simple docstring""" @contextmanager def _temporary_repo(lowerCAmelCase_ ): try: yield repo_id finally: cleanup_repo(lowerCAmelCase_ ) return _temporary_repo @pytest.fixture(scope="session" ) def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" lowercase = f'repo_txt_data-{int(time.time() * 1_0E3 )}' lowercase = f'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" , private=lowerCAmelCase_ ) hf_api.upload_file( token=lowerCAmelCase_ , path_or_fileobj=str(lowerCAmelCase_ ) , path_in_repo="data/text_data.txt" , repo_id=lowerCAmelCase_ , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session" ) def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" lowercase = f'repo_zipped_txt_data-{int(time.time() * 1_0E3 )}' lowercase = f'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" , private=lowerCAmelCase_ ) hf_api.upload_file( token=lowerCAmelCase_ , path_or_fileobj=str(lowerCAmelCase_ ) , path_in_repo="data.zip" , repo_id=lowerCAmelCase_ , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session" ) def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" lowercase = f'repo_zipped_img_data-{int(time.time() * 1_0E3 )}' lowercase = f'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" , private=lowerCAmelCase_ ) hf_api.upload_file( token=lowerCAmelCase_ , path_or_fileobj=str(lowerCAmelCase_ ) , path_in_repo="data.zip" , repo_id=lowerCAmelCase_ , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return hf_private_dataset_repo_zipped_img_data_
310
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowerCamelCase : Any = { '''configuration_clip''': [ '''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPConfig''', '''CLIPOnnxConfig''', '''CLIPTextConfig''', '''CLIPVisionConfig''', ], '''processing_clip''': ['''CLIPProcessor'''], '''tokenization_clip''': ['''CLIPTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[int] = ['''CLIPTokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = ['''CLIPFeatureExtractor'''] __lowerCamelCase : Dict = ['''CLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ '''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPModel''', '''CLIPPreTrainedModel''', '''CLIPTextModel''', '''CLIPTextModelWithProjection''', '''CLIPVisionModel''', '''CLIPVisionModelWithProjection''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = [ '''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCLIPModel''', '''TFCLIPPreTrainedModel''', '''TFCLIPTextModel''', '''TFCLIPVisionModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = [ '''FlaxCLIPModel''', '''FlaxCLIPPreTrainedModel''', '''FlaxCLIPTextModel''', '''FlaxCLIPTextPreTrainedModel''', '''FlaxCLIPVisionModel''', '''FlaxCLIPVisionPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys __lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
316
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( DiffusionPipeline, UnCLIPImageVariationPipeline, UnCLIPScheduler, UNetaDConditionModel, UNetaDModel, ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a__ ( A__ , unittest.TestCase ): A = UnCLIPImageVariationPipeline A = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'} A = IMAGE_VARIATION_BATCH_PARAMS A = [ 'generator', 'return_dict', 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] A = False @property def __UpperCamelCase ( self : List[str] ): """simple docstring""" return 32 @property def __UpperCamelCase ( self : str ): """simple docstring""" return 32 @property def __UpperCamelCase ( self : str ): """simple docstring""" return self.time_input_dim @property def __UpperCamelCase ( self : Dict ): """simple docstring""" return self.time_input_dim * 4 @property def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" return 100 @property def __UpperCamelCase ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def __UpperCamelCase ( self : Any ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : Optional[int] = CLIPTextConfig( bos_token_id=0,eos_token_id=2,hidden_size=self.text_embedder_hidden_size,projection_dim=self.text_embedder_hidden_size,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,) return CLIPTextModelWithProjection(_A ) @property def __UpperCamelCase ( self : Any ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : int = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size,projection_dim=self.text_embedder_hidden_size,num_hidden_layers=5,num_attention_heads=4,image_size=32,intermediate_size=37,patch_size=1,) return CLIPVisionModelWithProjection(_A ) @property def __UpperCamelCase ( self : List[Any] ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : List[str] = { "clip_embeddings_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "cross_attention_dim": self.cross_attention_dim, } SCREAMING_SNAKE_CASE_ : str = UnCLIPTextProjModel(**_A ) return model @property def __UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = { "sample_size": 32, # RGB in channels "in_channels": 3, # Out channels is double in channels because predicts mean and variance "out_channels": 6, "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": "identity", } SCREAMING_SNAKE_CASE_ : int = UNetaDConditionModel(**_A ) return model @property def __UpperCamelCase ( self : int ): """simple docstring""" return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "in_channels": 6, "out_channels": 3, } @property def __UpperCamelCase ( self : List[str] ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = UNetaDModel(**self.dummy_super_res_kwargs ) return model @property def __UpperCamelCase ( self : Tuple ): """simple docstring""" torch.manual_seed(1 ) SCREAMING_SNAKE_CASE_ : Optional[int] = UNetaDModel(**self.dummy_super_res_kwargs ) return model def __UpperCamelCase ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_decoder SCREAMING_SNAKE_CASE_ : str = self.dummy_text_proj SCREAMING_SNAKE_CASE_ : str = self.dummy_text_encoder SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_tokenizer SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_super_res_first SCREAMING_SNAKE_CASE_ : Dict = self.dummy_super_res_last SCREAMING_SNAKE_CASE_ : Dict = UnCLIPScheduler( variance_type="learned_range",prediction_type="epsilon",num_train_timesteps=1000,) SCREAMING_SNAKE_CASE_ : Union[str, Any] = UnCLIPScheduler( variance_type="fixed_small_log",prediction_type="epsilon",num_train_timesteps=1000,) SCREAMING_SNAKE_CASE_ : List[Any] = CLIPImageProcessor(crop_size=32,size=32 ) SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_image_encoder return { "decoder": decoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_proj": text_proj, "feature_extractor": feature_extractor, "image_encoder": image_encoder, "super_res_first": super_res_first, "super_res_last": super_res_last, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } def __UpperCamelCase ( self : Any,_A : Dict,_A : Any=0,_A : str=True ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = floats_tensor((1, 3, 32, 32),rng=random.Random(_A ) ).to(_A ) if str(_A ).startswith("mps" ): SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A ) else: SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Generator(device=_A ).manual_seed(_A ) if pil_image: SCREAMING_SNAKE_CASE_ : Optional[Any] = input_image * 0.5 + 0.5 SCREAMING_SNAKE_CASE_ : List[Any] = input_image.clamp(0,1 ) SCREAMING_SNAKE_CASE_ : Optional[int] = input_image.cpu().permute(0,2,3,1 ).float().numpy() SCREAMING_SNAKE_CASE_ : List[str] = DiffusionPipeline.numpy_to_pil(_A )[0] return { "image": input_image, "generator": generator, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "np", } def __UpperCamelCase ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = "cpu" SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components() SCREAMING_SNAKE_CASE_ : Optional[int] = self.pipeline_class(**_A ) SCREAMING_SNAKE_CASE_ : Optional[int] = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A,pil_image=_A ) SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**_A ) SCREAMING_SNAKE_CASE_ : Optional[int] = output.images SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A,pil_image=_A ) SCREAMING_SNAKE_CASE_ : Any = pipe( **_A,return_dict=_A,)[0] SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE_ : Tuple = np.array( [ 0.9997, 0.0002, 0.9997, 0.9997, 0.9969, 0.0023, 0.9997, 0.9969, 0.9970, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = "cpu" SCREAMING_SNAKE_CASE_ : str = self.get_dummy_components() SCREAMING_SNAKE_CASE_ : List[Any] = self.pipeline_class(**_A ) SCREAMING_SNAKE_CASE_ : Tuple = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A,pil_image=_A ) SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A ) SCREAMING_SNAKE_CASE_ : int = output.images SCREAMING_SNAKE_CASE_ : Dict = self.get_dummy_inputs(_A,pil_image=_A ) SCREAMING_SNAKE_CASE_ : List[str] = pipe( **_A,return_dict=_A,)[0] SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_ : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE_ : str = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCamelCase ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = "cpu" SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_components() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.pipeline_class(**_A ) SCREAMING_SNAKE_CASE_ : Tuple = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_inputs(_A,pil_image=_A ) SCREAMING_SNAKE_CASE_ : Optional[int] = [ pipeline_inputs["image"], pipeline_inputs["image"], ] SCREAMING_SNAKE_CASE_ : List[str] = pipe(**_A ) SCREAMING_SNAKE_CASE_ : Any = output.images SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_inputs(_A,pil_image=_A ) SCREAMING_SNAKE_CASE_ : List[str] = [ tuple_pipeline_inputs["image"], tuple_pipeline_inputs["image"], ] SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe( **_A,return_dict=_A,)[0] SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) SCREAMING_SNAKE_CASE_ : Dict = np.array( [ 0.9997, 0.9989, 0.0008, 0.0021, 0.9960, 0.0018, 0.0014, 0.0002, 0.9933, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCamelCase ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = torch.device("cpu" ) class a__ : A = 1 SCREAMING_SNAKE_CASE_ : str = self.get_dummy_components() SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A ) SCREAMING_SNAKE_CASE_ : Tuple = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) SCREAMING_SNAKE_CASE_ : List[Any] = torch.Generator(device=_A ).manual_seed(0 ) SCREAMING_SNAKE_CASE_ : Dict = pipe.decoder.dtype SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 SCREAMING_SNAKE_CASE_ : List[Any] = ( batch_size, pipe.decoder.config.in_channels, pipe.decoder.config.sample_size, pipe.decoder.config.sample_size, ) SCREAMING_SNAKE_CASE_ : Any = pipe.prepare_latents( _A,dtype=_A,device=_A,generator=_A,latents=_A,scheduler=DummyScheduler() ) SCREAMING_SNAKE_CASE_ : List[str] = ( batch_size, pipe.super_res_first.config.in_channels // 2, pipe.super_res_first.config.sample_size, pipe.super_res_first.config.sample_size, ) SCREAMING_SNAKE_CASE_ : Any = pipe.prepare_latents( _A,dtype=_A,device=_A,generator=_A,latents=_A,scheduler=DummyScheduler() ) SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(_A,pil_image=_A ) SCREAMING_SNAKE_CASE_ : int = pipe( **_A,decoder_latents=_A,super_res_latents=_A ).images SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_inputs(_A,pil_image=_A ) # Don't pass image, instead pass embedding SCREAMING_SNAKE_CASE_ : str = pipeline_inputs.pop("image" ) SCREAMING_SNAKE_CASE_ : List[str] = pipe.image_encoder(_A ).image_embeds SCREAMING_SNAKE_CASE_ : Any = pipe( **_A,decoder_latents=_A,super_res_latents=_A,image_embeddings=_A,).images # make sure passing text embeddings manually is identical assert np.abs(img_out_a - img_out_a ).max() < 1E-4 @skip_mps def __UpperCamelCase ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = torch_device == "cpu" # Check is relaxed because there is not a torch 2.0 sliced attention added kv processor SCREAMING_SNAKE_CASE_ : int = 1E-2 self._test_attention_slicing_forward_pass( test_max_difference=_A,expected_max_diff=_A ) @skip_mps def __UpperCamelCase ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = torch_device == "cpu" SCREAMING_SNAKE_CASE_ : List[str] = True SCREAMING_SNAKE_CASE_ : Dict = [ "decoder_num_inference_steps", "super_res_num_inference_steps", ] self._test_inference_batch_single_identical( test_max_difference=_A,relax_max_difference=_A,additional_params_copy_to_batched_inputs=_A,) def __UpperCamelCase ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = [ "decoder_num_inference_steps", "super_res_num_inference_steps", ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes SCREAMING_SNAKE_CASE_ : List[Any] = [2, 3] self._test_inference_batch_consistent( batch_sizes=_A,additional_params_copy_to_batched_inputs=_A,) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=_A ) @skip_mps def __UpperCamelCase ( self : Any ): """simple docstring""" return super().test_dict_tuple_outputs_equivalent() @skip_mps def __UpperCamelCase ( self : Optional[Any] ): """simple docstring""" return super().test_save_load_local() @skip_mps def __UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" return super().test_save_load_optional_components() @slow @require_torch_gpu class a__ ( unittest.TestCase ): def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : Any ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" ) SCREAMING_SNAKE_CASE_ : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/unclip/karlo_v1_alpha_cat_variation_fp16.npy" ) SCREAMING_SNAKE_CASE_ : Tuple = UnCLIPImageVariationPipeline.from_pretrained( "kakaobrain/karlo-v1-alpha-image-variations",torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE_ : Any = pipeline.to(_A ) pipeline.set_progress_bar_config(disable=_A ) SCREAMING_SNAKE_CASE_ : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) SCREAMING_SNAKE_CASE_ : List[Any] = pipeline( _A,generator=_A,output_type="np",) SCREAMING_SNAKE_CASE_ : List[str] = output.images[0] assert image.shape == (256, 256, 3) assert_mean_pixel_difference(_A,_A,15 )
316
1
class __a : def __init__( self : int ): '''simple docstring''' UpperCamelCase__ : int = {} def __lowercase ( self : Union[str, Any] ): '''simple docstring''' print(self.vertex ) for i in self.vertex: print(SCREAMING_SNAKE_CASE , " -> " , " -> ".join([str(SCREAMING_SNAKE_CASE ) for j in self.vertex[i]] ) ) def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if from_vertex in self.vertex: self.vertex[from_vertex].append(SCREAMING_SNAKE_CASE ) else: # else make a new vertex UpperCamelCase__ : Union[str, Any] = [to_vertex] def __lowercase ( self : Dict ): '''simple docstring''' UpperCamelCase__ : Dict = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowercase ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list ): '''simple docstring''' UpperCamelCase__ : List[str] = True print(SCREAMING_SNAKE_CASE , end=" " ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCamelCase : Union[str, Any] =Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print('''DFS:''') g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
228
lowerCamelCase : dict[str, float] ={ "km/h": 1.0, "m/s": 3.6, "mph": 1.60_9344, "knot": 1.852, } lowerCamelCase : dict[str, float] ={ "km/h": 1.0, "m/s": 0.2_7777_7778, "mph": 0.6_2137_1192, "knot": 0.5_3995_6803, } def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float: if unit_to not in speed_chart or unit_from not in speed_chart_inverse: UpperCamelCase__ : Tuple = ( f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n' f'Valid values are: {", ".join(__lowerCAmelCase )}' ) raise ValueError(__lowerCAmelCase ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
228
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase: Any = logging.get_logger(__name__) _lowercase: Optional[Any] = { '''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''', # See all SEW models at https://huggingface.co/models?filter=sew } class lowerCamelCase__ ( UpperCAmelCase ): UpperCamelCase__ ="sew" def __init__( self : Optional[int] , lowercase__ : List[Any]=32 , lowercase__ : Any=7_68 , lowercase__ : List[str]=12 , lowercase__ : Any=12 , lowercase__ : Any=30_72 , lowercase__ : Any=2 , lowercase__ : Tuple="gelu" , lowercase__ : Optional[Any]=0.1 , lowercase__ : Tuple=0.1 , lowercase__ : Optional[int]=0.1 , lowercase__ : Any=0.0 , lowercase__ : List[Any]=0.1 , lowercase__ : str=0.1 , lowercase__ : Dict=0.0_2 , lowercase__ : Any=1e-5 , lowercase__ : Optional[Any]="group" , lowercase__ : Tuple="gelu" , lowercase__ : Any=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , lowercase__ : Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__ : Any=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__ : List[Any]=False , lowercase__ : Union[str, Any]=1_28 , lowercase__ : List[Any]=16 , lowercase__ : Optional[Any]=True , lowercase__ : Optional[int]=0.0_5 , lowercase__ : int=10 , lowercase__ : List[Any]=2 , lowercase__ : Union[str, Any]=0.0 , lowercase__ : List[str]=10 , lowercase__ : Tuple=0 , lowercase__ : Any="mean" , lowercase__ : Optional[Any]=False , lowercase__ : List[Any]=False , lowercase__ : Tuple=2_56 , lowercase__ : List[Any]=0 , lowercase__ : int=1 , lowercase__ : Union[str, Any]=2 , **lowercase__ : Any , ): super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ ) _lowerCAmelCase = hidden_size _lowerCAmelCase = feat_extract_norm _lowerCAmelCase = feat_extract_activation _lowerCAmelCase = list(lowercase__ ) _lowerCAmelCase = list(lowercase__ ) _lowerCAmelCase = list(lowercase__ ) _lowerCAmelCase = conv_bias _lowerCAmelCase = num_conv_pos_embeddings _lowerCAmelCase = num_conv_pos_embedding_groups _lowerCAmelCase = len(self.conv_dim ) _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = intermediate_size _lowerCAmelCase = squeeze_factor _lowerCAmelCase = hidden_act _lowerCAmelCase = num_attention_heads _lowerCAmelCase = hidden_dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = activation_dropout _lowerCAmelCase = feat_proj_dropout _lowerCAmelCase = final_dropout _lowerCAmelCase = layerdrop _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range _lowerCAmelCase = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCAmelCase = apply_spec_augment _lowerCAmelCase = mask_time_prob _lowerCAmelCase = mask_time_length _lowerCAmelCase = mask_time_min_masks _lowerCAmelCase = mask_feature_prob _lowerCAmelCase = mask_feature_length _lowerCAmelCase = mask_feature_min_masks # ctc loss _lowerCAmelCase = ctc_loss_reduction _lowerCAmelCase = ctc_zero_infinity # sequence classification _lowerCAmelCase = use_weighted_layer_sum _lowerCAmelCase = classifier_proj_size @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): return functools.reduce(operator.mul , self.conv_stride , 1 )
225
import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() _lowercase: Optional[int] = logging.get_logger(__name__) def _lowerCamelCase ( snake_case , snake_case , snake_case ): _lowerCAmelCase = os.path.abspath(snake_case ) logger.info(F'Converting TensorFlow checkpoint from {tf_path}' ) # Load weights from TF model _lowerCAmelCase = tf.train.list_variables(snake_case ) _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") _lowerCAmelCase = full_name.split('/' ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(F'Skipping non-model layer {full_name}' ) continue if "optimizer" in full_name: logger.info(F'Skipping optimization layer {full_name}' ) continue if name[0] == "model": # ignore initial 'model' _lowerCAmelCase = name[1:] # figure out how many levels deep the name is _lowerCAmelCase = 0 for _name in name: if _name.startswith('layer_with_weights' ): depth += 1 else: break layer_depth.append(snake_case ) # read data _lowerCAmelCase = tf.train.load_variable(snake_case , snake_case ) names.append('/'.join(snake_case ) ) arrays.append(snake_case ) logger.info(F'Read a total of {len(snake_case ):,} layers' ) # Sanity check if len(set(snake_case ) ) != 1: raise ValueError(F'Found layer names with different depths (layer depth {list(set(snake_case ) )})' ) _lowerCAmelCase = list(set(snake_case ) )[0] if layer_depth != 1: raise ValueError( 'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP' ' heads.' ) # convert layers logger.info('Converting weights...' ) for full_name, array in zip(snake_case , snake_case ): _lowerCAmelCase = full_name.split('/' ) _lowerCAmelCase = model _lowerCAmelCase = [] for i, m_name in enumerate(snake_case ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith('layer_with_weights' ): _lowerCAmelCase = int(m_name.split('-' )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(['embeddings', 'LayerNorm'] ) _lowerCAmelCase = getattr(snake_case , 'embeddings' ) _lowerCAmelCase = getattr(snake_case , 'LayerNorm' ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(['encoder', 'layer', str(layer_num - 4 )] ) _lowerCAmelCase = getattr(snake_case , 'encoder' ) _lowerCAmelCase = getattr(snake_case , 'layer' ) _lowerCAmelCase = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(['pooler', 'dense'] ) _lowerCAmelCase = getattr(snake_case , 'pooler' ) _lowerCAmelCase = getattr(snake_case , 'dense' ) elif m_name == "embeddings": trace.append('embeddings' ) _lowerCAmelCase = getattr(snake_case , 'embeddings' ) if layer_num == 0: trace.append('word_embeddings' ) _lowerCAmelCase = getattr(snake_case , 'word_embeddings' ) elif layer_num == 1: trace.append('position_embeddings' ) _lowerCAmelCase = getattr(snake_case , 'position_embeddings' ) elif layer_num == 2: trace.append('token_type_embeddings' ) _lowerCAmelCase = getattr(snake_case , 'token_type_embeddings' ) else: raise ValueError(F'Unknown embedding layer with name {full_name}' ) trace.append('weight' ) _lowerCAmelCase = getattr(snake_case , 'weight' ) elif m_name == "_attention_layer": # self-attention layer trace.extend(['attention', 'self'] ) _lowerCAmelCase = getattr(snake_case , 'attention' ) _lowerCAmelCase = getattr(snake_case , 'self' ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(['attention', 'output', 'LayerNorm'] ) _lowerCAmelCase = getattr(snake_case , 'attention' ) _lowerCAmelCase = getattr(snake_case , 'output' ) _lowerCAmelCase = getattr(snake_case , 'LayerNorm' ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(['attention', 'output', 'dense'] ) _lowerCAmelCase = getattr(snake_case , 'attention' ) _lowerCAmelCase = getattr(snake_case , 'output' ) _lowerCAmelCase = getattr(snake_case , 'dense' ) elif m_name == "_output_dense": # output dense trace.extend(['output', 'dense'] ) _lowerCAmelCase = getattr(snake_case , 'output' ) _lowerCAmelCase = getattr(snake_case , 'dense' ) elif m_name == "_output_layer_norm": # output dense trace.extend(['output', 'LayerNorm'] ) _lowerCAmelCase = getattr(snake_case , 'output' ) _lowerCAmelCase = getattr(snake_case , 'LayerNorm' ) elif m_name == "_key_dense": # attention key trace.append('key' ) _lowerCAmelCase = getattr(snake_case , 'key' ) elif m_name == "_query_dense": # attention query trace.append('query' ) _lowerCAmelCase = getattr(snake_case , 'query' ) elif m_name == "_value_dense": # attention value trace.append('value' ) _lowerCAmelCase = getattr(snake_case , 'value' ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(['intermediate', 'dense'] ) _lowerCAmelCase = getattr(snake_case , 'intermediate' ) _lowerCAmelCase = getattr(snake_case , 'dense' ) elif m_name == "_output_layer_norm": # output layer norm trace.append('output' ) _lowerCAmelCase = getattr(snake_case , 'output' ) # weights & biases elif m_name in ["bias", "beta"]: trace.append('bias' ) _lowerCAmelCase = getattr(snake_case , 'bias' ) elif m_name in ["kernel", "gamma"]: trace.append('weight' ) _lowerCAmelCase = getattr(snake_case , 'weight' ) else: logger.warning(F'Ignored {m_name}' ) # for certain layers reshape is necessary _lowerCAmelCase = '.'.join(snake_case ) if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , snake_case ) or re.match( R'(\S+)\.attention\.output\.dense\.weight' , snake_case ): _lowerCAmelCase = array.reshape(pointer.data.shape ) if "kernel" in full_name: _lowerCAmelCase = array.transpose() if pointer.shape == array.shape: _lowerCAmelCase = torch.from_numpy(snake_case ) else: raise ValueError( F'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:' F' {array.shape}' ) logger.info(F'Successfully set variable {full_name} to PyTorch layer {trace}' ) return model def _lowerCamelCase ( snake_case , snake_case , snake_case ): # Instantiate model logger.info(F'Loading model based on config from {config_path}...' ) _lowerCAmelCase = BertConfig.from_json_file(snake_case ) _lowerCAmelCase = BertModel(snake_case ) # Load weights from checkpoint logger.info(F'Loading weights from checkpoint {tf_checkpoint_path}...' ) load_tfa_weights_in_bert(snake_case , snake_case , snake_case ) # Save pytorch-model logger.info(F'Saving PyTorch model to {pytorch_dump_path}...' ) torch.save(model.state_dict() , snake_case ) if __name__ == "__main__": _lowercase: str = argparse.ArgumentParser() parser.add_argument( '''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', type=str, required=True, help='''The config json file corresponding to the BERT model. This specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', type=str, required=True, help='''Path to the output PyTorch model (must include filename).''', ) _lowercase: Optional[int] = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
225
1
"""simple docstring""" import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Tuple = TaConfig.from_json_file(__UpperCamelCase ) print(F"Building PyTorch model from configuration: {config}" ) UpperCAmelCase__ : Any = TaForConditionalGeneration(__UpperCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_ta(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __UpperCAmelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
65
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case : Dict = logging.get_logger(__name__) snake_case : List[str] = { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''', } class snake_case_ (lowerCamelCase_ ): UpperCAmelCase__ : Optional[int] = '''lxmert''' UpperCAmelCase__ : Any = {} def __init__( self :Dict ,__snake_case :Optional[Any]=3_05_22 ,__snake_case :int=7_68 ,__snake_case :int=12 ,__snake_case :Any=95_00 ,__snake_case :Union[str, Any]=16_00 ,__snake_case :str=4_00 ,__snake_case :Optional[Any]=30_72 ,__snake_case :List[str]="gelu" ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Dict=5_12 ,__snake_case :str=2 ,__snake_case :List[str]=0.02 ,__snake_case :Optional[int]=1E-12 ,__snake_case :Any=9 ,__snake_case :List[str]=5 ,__snake_case :Optional[Any]=5 ,__snake_case :str=20_48 ,__snake_case :Optional[Any]=4 ,__snake_case :str=6.67 ,__snake_case :Union[str, Any]=True ,__snake_case :str=True ,__snake_case :int=True ,__snake_case :List[str]=True ,__snake_case :List[Any]=True ,__snake_case :str=True ,__snake_case :List[str]=True ,**__snake_case :Optional[Any] ,) -> str: a__ = vocab_size a__ = hidden_size a__ = num_attention_heads a__ = hidden_act a__ = intermediate_size a__ = hidden_dropout_prob a__ = attention_probs_dropout_prob a__ = max_position_embeddings a__ = type_vocab_size a__ = initializer_range a__ = layer_norm_eps a__ = num_qa_labels a__ = num_object_labels a__ = num_attr_labels a__ = l_layers a__ = x_layers a__ = r_layers a__ = visual_feat_dim a__ = visual_pos_dim a__ = visual_loss_normalizer a__ = task_matched a__ = task_mask_lm a__ = task_obj_predict a__ = task_qa a__ = visual_obj_loss a__ = visual_attr_loss a__ = visual_feat_loss a__ = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers} super().__init__(**__snake_case )
335
0
'''simple docstring''' import numpy # List of input, output pairs a : Optional[int] = ( ((5, 2, 3), 1_5), ((6, 5, 9), 2_5), ((1_1, 1_2, 1_3), 4_1), ((1, 1, 1), 8), ((1_1, 1_2, 1_3), 4_1), ) a : List[str] = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0)) a : int = [2, 4, 1, 5] a : Optional[Any] = len(train_data) a : List[Any] = 0.0_0_9 def __lowerCamelCase ( _lowercase , _lowercase="train" ) -> Dict: return calculate_hypothesis_value(_lowercase , _lowercase ) - output( _lowercase , _lowercase ) def __lowerCamelCase ( _lowercase ) -> int: UpperCAmelCase : List[str] = 0 for i in range(len(_lowercase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]: if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def __lowerCamelCase ( _lowercase , _lowercase ) -> Dict: if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def __lowerCamelCase ( _lowercase , _lowercase=m ) -> Any: UpperCAmelCase : Dict = 0 for i in range(_lowercase ): if index == -1: summation_value += _error(_lowercase ) else: summation_value += _error(_lowercase ) * train_data[i][0][index] return summation_value def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : Tuple = summation_of_cost_derivative(_lowercase , _lowercase ) / m return cost_derivative_value def __lowerCamelCase ( ) -> List[Any]: global parameter_vector # Tune these values to set a tolerance value for predicted output UpperCAmelCase : int = 0.00_0002 UpperCAmelCase : str = 0 UpperCAmelCase : Tuple = 0 while True: j += 1 UpperCAmelCase : Tuple = [0, 0, 0, 0] for i in range(0 , len(_lowercase ) ): UpperCAmelCase : Dict = get_cost_derivative(i - 1 ) UpperCAmelCase : List[str] = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( _lowercase , _lowercase , atol=_lowercase , rtol=_lowercase , ): break UpperCAmelCase : List[Any] = temp_parameter_vector print(("""Number of iterations:""", j) ) def __lowerCamelCase ( ) -> List[str]: for i in range(len(_lowercase ) ): print(("""Actual output value:""", output(_lowercase , """test""" )) ) print(("""Hypothesis output:""", calculate_hypothesis_value(_lowercase , """test""" )) ) if __name__ == "__main__": run_gradient_descent() print("""\nTesting gradient descent for a linear hypothesis function.\n""") test_gradient_descent()
672
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a : List[Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) a : List[str] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""), ("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""), ("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""), ("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""), ("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""), ("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""), ("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""), ("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""), ("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""), ("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""), ] ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]: UpperCAmelCase : List[str] = state_dict.pop(_lowercase ) UpperCAmelCase : List[str] = val def __lowerCamelCase ( _lowercase ) -> Any: UpperCAmelCase : Union[str, Any] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCAmelCase : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) UpperCAmelCase : Dict = value else: UpperCAmelCase : List[Any] = value return new_state_dict def __lowerCamelCase ( _lowercase , _lowercase=False ) -> Optional[int]: UpperCAmelCase : Dict = """""" if is_panoptic: UpperCAmelCase : Tuple = """conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase : Dict = in_proj_weight[:2_5_6, :] UpperCAmelCase : Optional[Any] = in_proj_bias[:2_5_6] UpperCAmelCase : List[Any] = in_proj_weight[2_5_6:5_1_2, :] UpperCAmelCase : Tuple = in_proj_bias[2_5_6:5_1_2] UpperCAmelCase : List[str] = in_proj_weight[-2_5_6:, :] UpperCAmelCase : List[str] = in_proj_bias[-2_5_6:] def __lowerCamelCase ( ) -> Dict: UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase : Tuple = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) return im @torch.no_grad() def __lowerCamelCase ( _lowercase , _lowercase ) -> str: UpperCAmelCase : str = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: UpperCAmelCase : List[Any] = """resnet101""" if "dc5" in model_name: UpperCAmelCase : Optional[int] = True UpperCAmelCase : List[Any] = """panoptic""" in model_name if is_panoptic: UpperCAmelCase : Union[str, Any] = 2_5_0 else: UpperCAmelCase : int = 9_1 UpperCAmelCase : Tuple = """huggingface/label-files""" UpperCAmelCase : List[Any] = """coco-detection-id2label.json""" UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase : Dict = {int(_lowercase ): v for k, v in idalabel.items()} UpperCAmelCase : Optional[Any] = idalabel UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()} # load image processor UpperCAmelCase : List[str] = """coco_panoptic""" if is_panoptic else """coco_detection""" UpperCAmelCase : List[Any] = ConditionalDetrImageProcessor(format=_lowercase ) # prepare image UpperCAmelCase : Union[str, Any] = prepare_img() UpperCAmelCase : Dict = image_processor(images=_lowercase , return_tensors="""pt""" ) UpperCAmelCase : List[Any] = encoding["""pixel_values"""] logger.info(F'''Converting model {model_name}...''' ) # load original model from torch hub UpperCAmelCase : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , _lowercase , pretrained=_lowercase ).eval() UpperCAmelCase : List[Any] = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: UpperCAmelCase : List[Any] = """conditional_detr.""" + src rename_key(_lowercase , _lowercase , _lowercase ) UpperCAmelCase : List[Any] = rename_backbone_keys(_lowercase ) # query, key and value matrices need special treatment read_in_q_k_v(_lowercase , is_panoptic=_lowercase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCAmelCase : int = """conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""conditional_detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowercase ) UpperCAmelCase : int = val elif "class_labels_classifier" in key or "bbox_predictor" in key: UpperCAmelCase : Any = state_dict.pop(_lowercase ) UpperCAmelCase : Optional[Any] = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: UpperCAmelCase : List[Any] = state_dict.pop(_lowercase ) UpperCAmelCase : str = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): UpperCAmelCase : Optional[int] = state_dict.pop(_lowercase ) UpperCAmelCase : Union[str, Any] = val # finally, create HuggingFace model and load state dict UpperCAmelCase : List[Any] = ConditionalDetrForSegmentation(_lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowercase ) model.load_state_dict(_lowercase ) model.eval() model.push_to_hub(repo_id=_lowercase , organization="""DepuMeng""" , commit_message="""Add model""" ) # verify our conversion UpperCAmelCase : Union[str, Any] = conditional_detr(_lowercase ) UpperCAmelCase : int = model(_lowercase ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 ) # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) model.save_pretrained(_lowercase ) image_processor.save_pretrained(_lowercase ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""conditional_detr_resnet50""", type=str, help="""Name of the CONDITIONAL_DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) a : Optional[Any] = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
672
1
import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser SCREAMING_SNAKE_CASE : int = re.compile(R"\s+") def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ): return {"hash": hashlib.mda(re.sub(_SCREAMING_SNAKE_CASE , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()} def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[int] ): UpperCamelCase_ : int = [len(_SCREAMING_SNAKE_CASE ) for line in example["""content"""].splitlines()] return {"line_mean": np.mean(_SCREAMING_SNAKE_CASE ), "line_max": max(_SCREAMING_SNAKE_CASE )} def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[Any] ): UpperCamelCase_ : Union[str, Any] = np.mean([c.isalnum() for c in example["""content"""]] ) return {"alpha_frac": alpha_frac} def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] ): if example["hash"] in uniques: uniques.remove(example["""hash"""] ) return True else: return False def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any]=5 ): UpperCamelCase_ : Dict = ["""auto-generated""", """autogenerated""", """automatically generated"""] UpperCamelCase_ : int = example["""content"""].splitlines() for _, line in zip(range(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int]=5 , _SCREAMING_SNAKE_CASE : int=0.05 ): UpperCamelCase_ : Optional[int] = ["""unit tests""", """test file""", """configuration file"""] UpperCamelCase_ : Dict = example["""content"""].splitlines() UpperCamelCase_ : Tuple = 0 UpperCamelCase_ : List[Any] = 0 # first test for _, line in zip(range(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test UpperCamelCase_ : Tuple = example["""content"""].count("""\n""" ) UpperCamelCase_ : Dict = int(coeff * nlines ) for line in lines: count_config += line.lower().count("""config""" ) count_test += line.lower().count("""test""" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[int] ): UpperCamelCase_ : Optional[int] = ["""def """, """class """, """for """, """while """] UpperCamelCase_ : str = example["""content"""].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str]=4 ): UpperCamelCase_ : str = example["""content"""].splitlines() UpperCamelCase_ : Optional[Any] = 0 for line in lines: counter += line.lower().count("""=""" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : List[Any] ): UpperCamelCase_ : Optional[Any] = tokenizer(example["""content"""] , truncation=_SCREAMING_SNAKE_CASE )["""input_ids"""] UpperCamelCase_ : Any = len(example["""content"""] ) / len(_SCREAMING_SNAKE_CASE ) return {"ratio": ratio} def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : List[str] ): UpperCamelCase_ : Tuple = {} results.update(get_hash(_SCREAMING_SNAKE_CASE ) ) results.update(line_stats(_SCREAMING_SNAKE_CASE ) ) results.update(alpha_stats(_SCREAMING_SNAKE_CASE ) ) results.update(char_token_ratio(_SCREAMING_SNAKE_CASE ) ) results.update(is_autogenerated(_SCREAMING_SNAKE_CASE ) ) results.update(is_config_or_test(_SCREAMING_SNAKE_CASE ) ) results.update(has_no_keywords(_SCREAMING_SNAKE_CASE ) ) results.update(has_few_assignments(_SCREAMING_SNAKE_CASE ) ) return results def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict ): if not check_uniques(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[Any] ): with open(_SCREAMING_SNAKE_CASE , """rb""" ) as f_in: with gzip.open(str(_SCREAMING_SNAKE_CASE ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out: shutil.copyfileobj(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) os.unlink(_SCREAMING_SNAKE_CASE ) # Settings SCREAMING_SNAKE_CASE : Any = HfArgumentParser(PreprocessingArguments) SCREAMING_SNAKE_CASE : Any = parser.parse_args() if args.num_workers is None: SCREAMING_SNAKE_CASE : Dict = multiprocessing.cpu_count() SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset SCREAMING_SNAKE_CASE : int = time.time() SCREAMING_SNAKE_CASE : Tuple = load_dataset(args.dataset_name, split="train") print(F'''Time to load dataset: {time.time()-t_start:.2f}''') # Run preprocessing SCREAMING_SNAKE_CASE : str = time.time() SCREAMING_SNAKE_CASE : Optional[Any] = ds.map(preprocess, num_proc=args.num_workers) print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''') # Deduplicate hashes SCREAMING_SNAKE_CASE : Tuple = set(ds.unique("hash")) SCREAMING_SNAKE_CASE : Any = len(uniques) / len(ds) print(F'''Fraction of duplicates: {1-frac:.2%}''') # Deduplicate data and apply heuristics SCREAMING_SNAKE_CASE : int = time.time() SCREAMING_SNAKE_CASE : List[str] = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args}) print(F'''Time to filter dataset: {time.time()-t_start:.2f}''') print(F'''Size of filtered dataset: {len(ds_filter)}''') # Deduplicate with minhash and jaccard similarity if args.near_deduplication: SCREAMING_SNAKE_CASE : Optional[int] = time.time() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''') print(F'''Size of deduplicate dataset: {len(ds_filter)}''') # Save data in batches of samples_per_file SCREAMING_SNAKE_CASE : List[str] = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / "duplicate_clusters.json", "w") as f: json.dump(duplicate_clusters, f) SCREAMING_SNAKE_CASE : Union[str, Any] = output_dir / "data" data_dir.mkdir(exist_ok=True) SCREAMING_SNAKE_CASE : Dict = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): SCREAMING_SNAKE_CASE : str = str(data_dir / F'''file-{file_number+1:012}.json''') SCREAMING_SNAKE_CASE : str = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
635
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase ( __a ): def __init__(self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Tuple: super().__init__() if safety_checker is None: logger.warning( f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure''' """ that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered""" """ results in services or applications open to the public. Both the diffusers team and Hugging Face""" """ strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling""" """ it only for use-cases that involve analyzing network behavior or auditing its results. For more""" """ information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" ) self.register_modules( speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , ) def A_ (self , __UpperCamelCase = "auto" ) -> List[str]: if slice_size == "auto": UpperCamelCase_ : Any = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCamelCase ) def A_ (self ) -> Any: self.enable_attention_slicing(__UpperCamelCase ) @torch.no_grad() def __call__(self , __UpperCamelCase , __UpperCamelCase=16_000 , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , **__UpperCamelCase , ) -> Optional[int]: UpperCamelCase_ : str = self.speech_processor.feature_extractor( __UpperCamelCase , return_tensors="""pt""" , sampling_rate=__UpperCamelCase ).input_features.to(self.device ) UpperCamelCase_ : List[Any] = self.speech_model.generate(__UpperCamelCase , max_length=480_000 ) UpperCamelCase_ : List[Any] = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[ 0 ] if isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCamelCase_ : List[Any] = 1 elif isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCamelCase_ : Optional[int] = len(__UpperCamelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(__UpperCamelCase )}.''' ) # get prompt text embeddings UpperCamelCase_ : List[Any] = self.tokenizer( __UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCamelCase_ : Dict = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCamelCase_ : List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCamelCase_ : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length] UpperCamelCase_ : Tuple = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCamelCase_,UpperCamelCase_,UpperCamelCase_ : Any = text_embeddings.shape UpperCamelCase_ : Union[str, Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 ) UpperCamelCase_ : Optional[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCamelCase_ : List[Any] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCamelCase_ : List[str] if negative_prompt is None: UpperCamelCase_ : Optional[Any] = [""""""] * batch_size elif type(__UpperCamelCase ) is not type(__UpperCamelCase ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !=''' f''' {type(__UpperCamelCase )}.''' ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCamelCase_ : Any = [negative_prompt] elif batch_size != len(__UpperCamelCase ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: UpperCamelCase_ : Optional[int] = negative_prompt UpperCamelCase_ : List[Any] = text_input_ids.shape[-1] UpperCamelCase_ : Any = self.tokenizer( __UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , ) UpperCamelCase_ : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCamelCase_ : List[str] = uncond_embeddings.shape[1] UpperCamelCase_ : List[str] = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 ) UpperCamelCase_ : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase_ : str = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCamelCase_ : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCamelCase_ : Optional[int] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCamelCase_ : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to( self.device ) else: UpperCamelCase_ : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCamelCase_ : Optional[int] = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCamelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCamelCase_ : Any = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase_ : List[str] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase_ : Optional[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase_ : Union[str, Any] = {} if accepts_eta: UpperCamelCase_ : Any = eta for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance UpperCamelCase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCamelCase_ : int = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase ) # predict the noise residual UpperCamelCase_ : Tuple = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample # perform guidance if do_classifier_free_guidance: UpperCamelCase_,UpperCamelCase_ : Union[str, Any] = noise_pred.chunk(2 ) UpperCamelCase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase_ : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) UpperCamelCase_ : List[str] = 1 / 0.18_215 * latents UpperCamelCase_ : List[Any] = self.vae.decode(__UpperCamelCase ).sample UpperCamelCase_ : Dict = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCamelCase_ : List[Any] = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return image return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
635
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: _UpperCAmelCase : List[Any] = None _UpperCAmelCase : Optional[int] = logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _UpperCAmelCase : Optional[int] = { "vocab_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model", }, "tokenizer_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json", }, } _UpperCAmelCase : Dict = { "xlnet-base-cased": None, "xlnet-large-cased": None, } _UpperCAmelCase : int = "▁" # Segments (not really needed) _UpperCAmelCase : Dict = 0 _UpperCAmelCase : Optional[int] = 1 _UpperCAmelCase : Tuple = 2 _UpperCAmelCase : List[str] = 3 _UpperCAmelCase : Optional[Any] = 4 class __magic_name__ ( UpperCamelCase_ ): UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = """left""" UpperCamelCase__ = XLNetTokenizer def __init__( self , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=True , snake_case_=False , snake_case_="<s>" , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<sep>" , snake_case_="<pad>" , snake_case_="<cls>" , snake_case_="<mask>" , snake_case_=["<eop>", "<eod>"] , **snake_case_ , ): lowercase =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token super().__init__( vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , ) lowercase =3 lowercase =do_lower_case lowercase =remove_space lowercase =keep_accents lowercase =vocab_file lowercase =False if not self.vocab_file else True def _A( self , snake_case_ , snake_case_ = None ): lowercase =[self.sep_token_id] lowercase =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _A( self , snake_case_ , snake_case_ = None ): lowercase =[self.sep_token_id] lowercase =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _A( self , snake_case_ , snake_case_ = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(_a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowercase =os.path.join( _a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
712
'''simple docstring''' import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow _UpperCAmelCase : Tuple = False class __magic_name__ ( unittest.TestCase ): def _A( self , snake_case_=32 ): set_seed(0 ) lowercase =UNetaDModel(sample_size=snake_case_ , in_channels=3 , out_channels=3 ) lowercase =torch.optim.SGD(model.parameters() , lr=0.00_01 ) return model, optimizer @slow def _A( self ): lowercase ='''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable lowercase =DDPMScheduler( num_train_timesteps=10_00 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=snake_case_ , ) lowercase =DDIMScheduler( num_train_timesteps=10_00 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=snake_case_ , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) lowercase =[torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(snake_case_ ) for _ in range(4 )] lowercase =[torch.randn((4, 3, 32, 32) ).to(snake_case_ ) for _ in range(4 )] lowercase =[torch.randint(0 , 10_00 , (4,) ).long().to(snake_case_ ) for _ in range(4 )] # train with a DDPM scheduler lowercase , lowercase =self.get_model_optimizer(resolution=32 ) model.train().to(snake_case_ ) for i in range(4 ): optimizer.zero_grad() lowercase =ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowercase =model(snake_case_ , timesteps[i] ).sample lowercase =torch.nn.functional.mse_loss(snake_case_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM lowercase , lowercase =self.get_model_optimizer(resolution=32 ) model.train().to(snake_case_ ) for i in range(4 ): optimizer.zero_grad() lowercase =ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowercase =model(snake_case_ , timesteps[i] ).sample lowercase =torch.nn.functional.mse_loss(snake_case_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) ) self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
145
0
'''simple docstring''' import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __snake_case =list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __snake_case =[file for file in filepaths if file != file.lower()] if upper_files: print(F'''{len(upper_files)} files contain uppercase characters:''') print("""\n""".join(upper_files) + """\n""") __snake_case =[file for file in filepaths if ''' ''' in file] if space_files: print(F'''{len(space_files)} files contain space characters:''') print("""\n""".join(space_files) + """\n""") __snake_case =[file for file in filepaths if '''-''' in file] if hyphen_files: print(F'''{len(hyphen_files)} files contain hyphen characters:''') print("""\n""".join(hyphen_files) + """\n""") __snake_case =[file for file in filepaths if os.sep not in file] if nodir_files: print(F'''{len(nodir_files)} files are not in a directory:''') print("""\n""".join(nodir_files) + """\n""") __snake_case =len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
133
from datetime import datetime as dt import os from github import Github __magic_name__ : Dict = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def lowercase__ ( ) -> Dict: """simple docstring""" UpperCamelCase = Github(os.environ['GITHUB_TOKEN']) UpperCamelCase = g.get_repo('huggingface/transformers') UpperCamelCase = repo.get_issues(state='open') for issue in open_issues: UpperCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda _UpperCamelCase: i.created_at , reverse=_UpperCamelCase) UpperCamelCase = comments[0] if len(_UpperCamelCase) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='closed') elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.') if __name__ == "__main__": main()
280
0
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class _UpperCamelCase ( unittest.TestCase ): def lowercase ( self: int ) -> str: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase ( self: Optional[int] ) -> str: """simple docstring""" UpperCamelCase_ = 1 UpperCamelCase_ = 3 UpperCamelCase_ = (32, 32) UpperCamelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_SCREAMING_SNAKE_CASE ) return image @property def lowercase ( self: List[str] ) -> Dict: """simple docstring""" torch.manual_seed(0 ) UpperCamelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) return model @property def lowercase ( self: List[Any] ) -> int: """simple docstring""" torch.manual_seed(0 ) UpperCamelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def lowercase ( self: Tuple ) -> Dict: """simple docstring""" torch.manual_seed(0 ) UpperCamelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(_SCREAMING_SNAKE_CASE ) @property def lowercase ( self: int ) -> List[Any]: """simple docstring""" def extract(*_SCREAMING_SNAKE_CASE: Dict , **_SCREAMING_SNAKE_CASE: Any ): class _UpperCamelCase : def __init__( self: List[Any] ) -> List[Any]: """simple docstring""" UpperCamelCase_ = torch.ones([0] ) def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] ) -> Dict: """simple docstring""" self.pixel_values.to(_SCREAMING_SNAKE_CASE ) return self return Out() return extract def lowercase ( self: Union[str, Any] ) -> Optional[Any]: """simple docstring""" UpperCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCamelCase_ = self.dummy_cond_unet UpperCamelCase_ = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , ) UpperCamelCase_ = self.dummy_vae UpperCamelCase_ = self.dummy_text_encoder UpperCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk UpperCamelCase_ = StableDiffusionPipeline( unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , ) UpperCamelCase_ = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = "A painting of a squirrel eating a burger" UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 ) UpperCamelCase_ = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" ) UpperCamelCase_ = output.images UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 ) UpperCamelCase_ = sd_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_SCREAMING_SNAKE_CASE , )[0] UpperCamelCase_ = image[0, -3:, -3:, -1] UpperCamelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase_ = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase ( self: Any ) -> Optional[Any]: """simple docstring""" UpperCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCamelCase_ = self.dummy_cond_unet UpperCamelCase_ = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = self.dummy_vae UpperCamelCase_ = self.dummy_text_encoder UpperCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk UpperCamelCase_ = StableDiffusionPipeline( unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , ) UpperCamelCase_ = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = "A painting of a squirrel eating a burger" UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 ) UpperCamelCase_ = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" ) UpperCamelCase_ = output.images UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 ) UpperCamelCase_ = sd_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_SCREAMING_SNAKE_CASE , )[0] UpperCamelCase_ = image[0, -3:, -3:, -1] UpperCamelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase_ = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase ( self: Optional[int] ) -> List[Any]: """simple docstring""" UpperCamelCase_ = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=_SCREAMING_SNAKE_CASE ) assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert isinstance(pipe.scheduler , _SCREAMING_SNAKE_CASE ) assert pipe.safety_checker is None UpperCamelCase_ = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE ) # sanity check that the pipeline still works assert pipe.safety_checker is None UpperCamelCase_ = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def lowercase ( self: List[Any] ) -> Dict: """simple docstring""" UpperCamelCase_ = self.dummy_cond_unet UpperCamelCase_ = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = self.dummy_vae UpperCamelCase_ = self.dummy_text_encoder UpperCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # put models in fp16 UpperCamelCase_ = unet.half() UpperCamelCase_ = vae.half() UpperCamelCase_ = bert.half() # make sure here that pndm scheduler skips prk UpperCamelCase_ = StableDiffusionPipeline( unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , ) UpperCamelCase_ = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = "A painting of a squirrel eating a burger" UpperCamelCase_ = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class _UpperCamelCase ( unittest.TestCase ): def lowercase ( self: List[Any] ) -> Union[str, Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase ( self: Any ) -> Optional[Any]: """simple docstring""" UpperCamelCase_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCamelCase_ = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = ( "portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle" " coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with" " anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and" " children from bahnhof zoo, detailed " ) UpperCamelCase_ = 4003660346 UpperCamelCase_ = 7 # without safety guidance (sld_guidance_scale = 0) UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = sd_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) UpperCamelCase_ = output.images UpperCamelCase_ = image[0, -3:, -3:, -1] UpperCamelCase_ = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = sd_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCamelCase_ = output.images UpperCamelCase_ = image[0, -3:, -3:, -1] UpperCamelCase_ = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase ( self: Optional[int] ) -> str: """simple docstring""" UpperCamelCase_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCamelCase_ = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = "padme amidala taking a bath artwork, safe for work, no nudity" UpperCamelCase_ = 2734971755 UpperCamelCase_ = 7 UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = sd_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) UpperCamelCase_ = output.images UpperCamelCase_ = image[0, -3:, -3:, -1] UpperCamelCase_ = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = sd_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCamelCase_ = output.images UpperCamelCase_ = image[0, -3:, -3:, -1] UpperCamelCase_ = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase ( self: List[Any] ) -> Optional[int]: """simple docstring""" UpperCamelCase_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" ) UpperCamelCase_ = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = ( "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c." " leyendecker" ) UpperCamelCase_ = 1044355234 UpperCamelCase_ = 12 UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = sd_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) UpperCamelCase_ = output.images UpperCamelCase_ = image[0, -3:, -3:, -1] UpperCamelCase_ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = sd_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCamelCase_ = output.images UpperCamelCase_ = image[0, -3:, -3:, -1] UpperCamelCase_ = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
712
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = '▁' _UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'} _UpperCAmelCase = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model' ), } } _UpperCAmelCase = { 'facebook/nllb-200-distilled-600M': 1_0_2_4, } # fmt: off _UpperCAmelCase = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class _UpperCamelCase ( lowerCAmelCase_ ): _UpperCamelCase : Dict = VOCAB_FILES_NAMES _UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Union[str, Any] = ['''input_ids''', '''attention_mask'''] _UpperCamelCase : List[int] = [] _UpperCamelCase : List[int] = [] def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str]="<s>" , _SCREAMING_SNAKE_CASE: Optional[int]="</s>" , _SCREAMING_SNAKE_CASE: int="</s>" , _SCREAMING_SNAKE_CASE: Union[str, Any]="<s>" , _SCREAMING_SNAKE_CASE: Any="<unk>" , _SCREAMING_SNAKE_CASE: Union[str, Any]="<pad>" , _SCREAMING_SNAKE_CASE: int="<mask>" , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: Optional[Dict[str, Any]] = None , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: Tuple=False , **_SCREAMING_SNAKE_CASE: List[str] , ) -> Tuple: """simple docstring""" UpperCamelCase_ = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs UpperCamelCase_ = legacy_behaviour super().__init__( bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase_ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token UpperCamelCase_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab UpperCamelCase_ = 1 UpperCamelCase_ = len(self.sp_model ) UpperCamelCase_ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_SCREAMING_SNAKE_CASE ) } UpperCamelCase_ = {v: k for k, v in self.lang_code_to_id.items()} UpperCamelCase_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) UpperCamelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} UpperCamelCase_ = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) UpperCamelCase_ = src_lang if src_lang is not None else "eng_Latn" UpperCamelCase_ = self.lang_code_to_id[self._src_lang] UpperCamelCase_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self: Any ) -> Union[str, Any]: """simple docstring""" UpperCamelCase_ = self.__dict__.copy() UpperCamelCase_ = None UpperCamelCase_ = self.sp_model.serialized_model_proto() return state def __setstate__( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> Tuple: """simple docstring""" UpperCamelCase_ = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): UpperCamelCase_ = {} UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def lowercase ( self: Union[str, Any] ) -> Dict: """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowercase ( self: Union[str, Any] ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: str ) -> None: """simple docstring""" UpperCamelCase_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None , _SCREAMING_SNAKE_CASE: bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = [1] * len(self.prefix_tokens ) UpperCamelCase_ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None ) -> List[int]: """simple docstring""" UpperCamelCase_ = [self.sep_token_id] UpperCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] , _SCREAMING_SNAKE_CASE: Optional[str] , **_SCREAMING_SNAKE_CASE: Tuple ) -> int: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) UpperCamelCase_ = src_lang UpperCamelCase_ = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = tgt_lang_id return inputs def lowercase ( self: Tuple ) -> Union[str, Any]: """simple docstring""" UpperCamelCase_ = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str ) -> List[str]: """simple docstring""" return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE ) def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: str ) -> Optional[int]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCamelCase_ = self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowercase ( self: int , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Any: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase ( self: int , _SCREAMING_SNAKE_CASE: Optional[int] ) -> Optional[int]: """simple docstring""" UpperCamelCase_ = "".join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , " " ).strip() return out_string def lowercase ( self: str , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase_ = os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(_SCREAMING_SNAKE_CASE , "wb" ) as fi: UpperCamelCase_ = self.sp_model.serialized_model_proto() fi.write(_SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str = "eng_Latn" , _SCREAMING_SNAKE_CASE: Optional[List[str]] = None , _SCREAMING_SNAKE_CASE: str = "fra_Latn" , **_SCREAMING_SNAKE_CASE: List[str] , ) -> BatchEncoding: """simple docstring""" UpperCamelCase_ = src_lang UpperCamelCase_ = tgt_lang return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def lowercase ( self: Any ) -> Optional[int]: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def lowercase ( self: Dict ) -> Optional[int]: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Any ) -> None: """simple docstring""" UpperCamelCase_ = self.lang_code_to_id[src_lang] if self.legacy_behaviour: UpperCamelCase_ = [] UpperCamelCase_ = [self.eos_token_id, self.cur_lang_code] else: UpperCamelCase_ = [self.cur_lang_code] UpperCamelCase_ = [self.eos_token_id] def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: str ) -> None: """simple docstring""" UpperCamelCase_ = self.lang_code_to_id[lang] if self.legacy_behaviour: UpperCamelCase_ = [] UpperCamelCase_ = [self.eos_token_id, self.cur_lang_code] else: UpperCamelCase_ = [self.cur_lang_code] UpperCamelCase_ = [self.eos_token_id]
371
0
def __snake_case ( ) -> int: return 1 def __snake_case ( lowerCAmelCase_ ) -> int: return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def __snake_case ( lowerCAmelCase_ ) -> int: return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase_ ) def __snake_case ( lowerCAmelCase_ ) -> int: return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(lowerCAmelCase_ ) def __snake_case ( lowerCAmelCase_ ) -> int: return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(lowerCAmelCase_ ) def __snake_case ( lowerCAmelCase_ ) -> int: return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(lowerCAmelCase_ ) def __snake_case ( lowerCAmelCase_ ) -> int: return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(lowerCAmelCase_ ) def __snake_case ( lowerCAmelCase_ ) -> int: return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(lowerCAmelCase_ ) def __snake_case ( lowerCAmelCase_ = 2_0_0 ) -> int: return two_pound(lowerCAmelCase_ ) if __name__ == "__main__": print(solution(int(input().strip())))
100
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowercase_ = logging.getLogger(__name__) def lowerCAmelCase (__A , __A): """simple docstring""" if os.path.exists(__A): if os.path.exists(os.path.join(__A , '''config.json''')) and os.path.isfile( os.path.join(__A , '''config.json''')): os.remove(os.path.join(__A , '''config.json''')) if os.path.exists(os.path.join(__A , '''pytorch_model.bin''')) and os.path.isfile( os.path.join(__A , '''pytorch_model.bin''')): os.remove(os.path.join(__A , '''pytorch_model.bin''')) else: os.makedirs(__A) model.save_pretrained(__A) def lowerCAmelCase (__A , __A=False): """simple docstring""" _a = 2 if unlogit: _a = torch.pow(__A , __A) _a = p * torch.log(__A) _a = 0 return -plogp.sum(dim=-1) def lowerCAmelCase (__A): """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(F'''{x + 1}''' for x in range(len(__A)))) for row in range(len(__A)): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:.5f}''' for x in tensor[row].cpu().data)) else: logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:d}''' for x in tensor[row].cpu().data)) def lowerCAmelCase (__A , __A , __A , __A=True , __A=True , __A=None , __A=False): """simple docstring""" _a , _a = model.config.num_hidden_layers, model.config.num_attention_heads _a = torch.zeros(__A , __A).to(args.device) _a = torch.zeros(__A , __A).to(args.device) if head_mask is None: _a = torch.ones(__A , __A).to(args.device) head_mask.requires_grad_(requires_grad=__A) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _a = None _a = 0.0 _a = 0.0 for step, inputs in enumerate(tqdm(__A , desc='''Iteration''' , disable=args.local_rank not in [-1, 0])): _a = tuple(t.to(args.device) for t in inputs) ((_a) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _a = model(__A , labels=__A , head_mask=__A) # (loss), lm_logits, presents, (all hidden_states), (attentions) _a , _a , _a = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__A): _a = entropy(attn.detach() , __A) attn_entropy[layer] += masked_entropy.sum(-1).sum(0).sum(0).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__A).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _a = 2 _a = torch.pow(torch.pow(__A , __A).sum(-1) , 1 / exponent) head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20 if not args.dont_normalize_global_importance: _a = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''') print_ad_tensor(__A) if compute_importance: logger.info('''Head importance scores''') print_ad_tensor(__A) logger.info('''Head ranked by importance scores''') _a = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device) _a = torch.arange( head_importance.numel() , device=args.device) _a = head_ranks.view_as(__A) print_ad_tensor(__A) return attn_entropy, head_importance, total_loss def lowerCAmelCase (__A , __A , __A): """simple docstring""" _a , _a , _a = compute_heads_importance(__A , __A , __A , compute_entropy=__A) _a = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , __A , original_score * args.masking_threshold) _a = torch.ones_like(__A) _a = max(1 , int(new_head_mask.numel() * args.masking_amount)) _a = original_score while current_score >= original_score * args.masking_threshold: _a = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _a = float('''Inf''') _a = head_importance.view(-1).sort()[1] if len(__A) <= num_to_mask: print('''BREAK BY num_to_mask''') break # mask heads _a = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist())) _a = new_head_mask.view(-1) _a = 0.0 _a = new_head_mask.view_as(__A) _a = new_head_mask.clone().detach() print_ad_tensor(__A) # Compute metric and head importance again _a , _a , _a = compute_heads_importance( __A , __A , __A , compute_entropy=__A , head_mask=__A) _a = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''') print_ad_tensor(__A) np.save(os.path.join(args.output_dir , '''head_mask.npy''') , head_mask.detach().cpu().numpy()) return head_mask def lowerCAmelCase (__A , __A , __A , __A): """simple docstring""" _a = datetime.now() _a , _a , _a = compute_heads_importance( __A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A) _a = 1 / loss _a = datetime.now() - before_time _a = sum(p.numel() for p in model.parameters()) _a = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A)) } for k, v in heads_to_prune.items(): if isinstance(__A , __A): _a = [ v, ] assert sum(len(__A) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item() model.prune_heads(__A) _a = sum(p.numel() for p in model.parameters()) _a = datetime.now() _a , _a , _a = compute_heads_importance( __A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , ) _a = 1 / loss _a = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __A , __A , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __A , __A) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100) save_model(__A , args.output_dir) def lowerCAmelCase (): """simple docstring""" _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=__A , type=__A , required=__A , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=__A , type=__A , required=__A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=__A , type=__A , required=__A , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=__A , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=__A , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=__A , type=__A , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=__A , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''') parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''') parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''') parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''') parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''') parser.add_argument( '''--masking_threshold''' , default=0.9 , type=__A , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=__A , help='''Amount to heads to masking at each masking step.''') parser.add_argument('''--metric_name''' , default='''acc''' , type=__A , help='''Metric to use for head masking.''') parser.add_argument( '''--max_seq_length''' , default=128 , type=__A , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=__A , help='''Batch size.''') parser.add_argument('''--seed''' , type=__A , default=42) parser.add_argument('''--local_rank''' , type=__A , default=-1 , help='''local_rank for distributed training on gpus''') parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''') parser.add_argument('''--server_ip''' , type=__A , default='''''' , help='''Can be used for distant debugging.''') parser.add_argument('''--server_port''' , type=__A , default='''''' , help='''Can be used for distant debugging.''') _a = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''') ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _a = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''') _a = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) _a = torch.device('''cuda''' , args.local_rank) _a = 1 torch.distributed.init_process_group(backend='''nccl''') # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1))) _a = GPTaLMHeadModel.from_pretrained(args.model_name_or_path) # Distributed and parallel training model.to(args.device) if args.local_rank != -1: _a = nn.parallel.DistributedDataParallel( __A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A) elif args.n_gpu > 1: _a = nn.DataParallel(__A) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__A) torch.save(__A , os.path.join(args.output_dir , '''run_args.bin''')) logger.info('''Training/evaluation parameters %s''' , __A) # Prepare dataset _a = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa), ]) _a = (torch.from_numpy(__A),) _a = TensorDataset(*__A) _a = RandomSampler(__A) _a = DataLoader(__A , sampler=__A , batch_size=args.batch_size) # Compute head entropy and importance score compute_heads_importance(__A , __A , __A) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _a = mask_heads(__A , __A , __A) prune_heads(__A , __A , __A , __A) if __name__ == "__main__": main()
11
0
"""simple docstring""" import argparse import os import re import packaging.version _UpperCamelCase : Union[str, Any] = "examples/" _UpperCamelCase : Any = { "examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"), "init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"), "setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","), "doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"), } _UpperCamelCase : List[Any] = { "init": "src/transformers/__init__.py", "setup": "setup.py", } _UpperCamelCase : Tuple = "README.md" def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ): '''simple docstring''' with open(__UpperCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: lowercase__ : List[Any] = f.read() lowercase__ , lowercase__ : int = REPLACE_PATTERNS[pattern] lowercase__ : Optional[int] = replace.replace('VERSION' , __UpperCamelCase ) lowercase__ : Tuple = re_pattern.sub(__UpperCamelCase , __UpperCamelCase ) with open(__UpperCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(__UpperCamelCase ) def a_ ( _lowerCAmelCase : Optional[Any] ): '''simple docstring''' for folder, directories, fnames in os.walk(__UpperCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern='examples' ) def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=False ): '''simple docstring''' for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if not patch: update_version_in_examples(__UpperCamelCase ) def a_ ( ): '''simple docstring''' lowercase__ : int = '🤗 Transformers currently provides the following architectures' lowercase__ : Tuple = '1. Want to contribute a new model?' with open(__UpperCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: lowercase__ : List[Any] = f.readlines() # Find the start of the list. lowercase__ : Optional[int] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowercase__ : Optional[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): lowercase__ : Optional[int] = lines[index].replace( 'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , ) index += 1 with open(__UpperCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(__UpperCamelCase ) def a_ ( ): '''simple docstring''' with open(REPLACE_FILES['init'] , 'r' ) as f: lowercase__ : Tuple = f.read() lowercase__ : Optional[int] = REPLACE_PATTERNS['init'][0].search(__UpperCamelCase ).groups()[0] return packaging.version.parse(__UpperCamelCase ) def a_ ( _lowerCAmelCase : Union[str, Any]=False ): '''simple docstring''' lowercase__ : int = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: lowercase__ : Union[str, Any] = default_version.base_version elif patch: lowercase__ : List[str] = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: lowercase__ : Dict = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. lowercase__ : Tuple = input(f"""Which version are you releasing? [{default_version}]""" ) if len(__UpperCamelCase ) == 0: lowercase__ : int = default_version print(f"""Updating version to {version}.""" ) global_version_update(__UpperCamelCase , patch=__UpperCamelCase ) if not patch: print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() def a_ ( ): '''simple docstring''' lowercase__ : Union[str, Any] = get_version() lowercase__ : Union[str, Any] = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" lowercase__ : Tuple = current_version.base_version # Check with the user we got that right. lowercase__ : Optional[int] = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(__UpperCamelCase ) == 0: lowercase__ : List[Any] = dev_version print(f"""Updating version to {version}.""" ) global_version_update(__UpperCamelCase ) print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() if __name__ == "__main__": _UpperCamelCase : Dict = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") _UpperCamelCase : Union[str, Any] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
713
"""simple docstring""" from __future__ import annotations def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ): '''simple docstring''' if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif stress < 0: raise ValueError('Stress cannot be negative' ) elif tangential_force < 0: raise ValueError('Tangential Force cannot be negative' ) elif area < 0: raise ValueError('Area cannot be negative' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
645
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class SCREAMING_SNAKE_CASE ( snake_case_ ): __magic_name__ : torch.FloatTensor __magic_name__ : torch.FloatTensor __magic_name__ : Optional[torch.FloatTensor] = None class SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ): __magic_name__ : str = 2 @register_to_config def __init__( self : str , lowercase__ : Optional[int] = 0.02 , lowercase__ : List[Any] = 100 , lowercase__ : Optional[int] = 1.007 , lowercase__ : Tuple = 80 , lowercase__ : Optional[Any] = 0.05 , lowercase__ : List[Any] = 50 , ): '''simple docstring''' a_ : Union[str, Any] = sigma_max # setable values a_ : int = None a_ : np.IntTensor = None a_ : torch.FloatTensor = None # sigma(t_i) def lowercase_ ( self : int , lowercase__ : Optional[int] , lowercase__ : Dict = None ): '''simple docstring''' return sample def lowercase_ ( self : Optional[Any] , lowercase__ : str , lowercase__ : Tuple = None ): '''simple docstring''' a_ : Dict = num_inference_steps a_ : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy() a_ : str = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) a_ : Optional[Any] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] a_ : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self : Union[str, Any] , lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Tuple = None ): '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: a_ : Any = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: a_ : Any = 0 # sample eps ~ N(0, S_noise^2 * I) a_ : Optional[int] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device ) a_ : Union[str, Any] = sigma + gamma * sigma a_ : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def lowercase_ ( self : Optional[Any] , lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : List[str] = True , ): '''simple docstring''' a_ : Optional[int] = sample_hat + sigma_hat * model_output a_ : Optional[int] = (sample_hat - pred_original_sample) / sigma_hat a_ : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self : Optional[int] , lowercase__ : int , lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : str , lowercase__ : Union[str, Any] = True , ): '''simple docstring''' a_ : int = sample_prev + sigma_prev * model_output a_ : str = (sample_prev - pred_original_sample) / sigma_prev a_ : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self : Optional[Any] , lowercase__ : Any , lowercase__ : Any , lowercase__ : int ): '''simple docstring''' raise NotImplementedError()
442
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) __lowercase : Any = logging.getLogger(__name__) @dataclass(frozen=snake_case ) class _A : '''simple docstring''' __lowerCamelCase : str __lowerCamelCase : str __lowerCamelCase : Optional[str] = None __lowerCamelCase : Optional[str] = None __lowerCamelCase : Optional[str] = None @dataclass(frozen=snake_case ) class _A : '''simple docstring''' __lowerCamelCase : List[int] __lowerCamelCase : Optional[List[int]] = None __lowerCamelCase : Optional[List[int]] = None __lowerCamelCase : Optional[Union[int, float]] = None __lowerCamelCase : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class _A ( snake_case ): '''simple docstring''' __lowerCamelCase : List[InputFeatures] def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,): '''simple docstring''' snake_case : str = hans_processors[task]() snake_case : str = os.path.join( SCREAMING_SNAKE_CASE_ ,"""cached_{}_{}_{}_{}""".format( """dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,) ,) snake_case : Dict = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) snake_case , snake_case : List[Any] = label_list[2], label_list[1] snake_case : List[Any] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case : Any = cached_features_file + """.lock""" with FileLock(SCREAMING_SNAKE_CASE_ ): if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache: logger.info(F"""Loading features from cached file {cached_features_file}""" ) snake_case : int = torch.load(SCREAMING_SNAKE_CASE_ ) else: logger.info(F"""Creating features from dataset file at {data_dir}""" ) snake_case : Union[str, Any] = ( processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ ) ) logger.info("""Training examples: %s""" ,len(SCREAMING_SNAKE_CASE_ ) ) snake_case : Dict = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) logger.info("""Saving features into cached file %s""" ,SCREAMING_SNAKE_CASE_ ) torch.save(self.features ,SCREAMING_SNAKE_CASE_ ) def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return self.features[i] def snake_case_ ( self ): '''simple docstring''' return self.label_list if is_tf_available(): import tensorflow as tf class _A : '''simple docstring''' __lowerCamelCase : List[InputFeatures] def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 128 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,): '''simple docstring''' snake_case : Any = hans_processors[task]() snake_case : List[str] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) snake_case , snake_case : int = label_list[2], label_list[1] snake_case : List[str] = label_list snake_case : int = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ ) snake_case : Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ): if ex_index % 10000 == 0: logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) snake_case : Any = tf.data.Dataset.from_generator( SCREAMING_SNAKE_CASE_ ,( { """example_id""": tf.intaa, """input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa, }, tf.intaa, ) ,( { """example_id""": tf.TensorShape([] ), """input_ids""": tf.TensorShape([None, None] ), """attention_mask""": tf.TensorShape([None, None] ), """token_type_ids""": tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) ,) def snake_case_ ( self ): '''simple docstring''' return self.dataset def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return self.features[i] def snake_case_ ( self ): '''simple docstring''' return self.label_list class _A ( snake_case ): '''simple docstring''' def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_train_set.txt""" ) ) ,"""train""" ) def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" ) def snake_case_ ( self ): '''simple docstring''' return ["contradiction", "entailment", "neutral"] def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' snake_case : List[str] = [] for i, line in enumerate(SCREAMING_SNAKE_CASE_ ): if i == 0: continue snake_case : Any = """%s-%s""" % (set_type, line[0]) snake_case : Optional[int] = line[5] snake_case : Union[str, Any] = line[6] snake_case : Optional[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7] snake_case : Dict = line[0] examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_ ,text_a=SCREAMING_SNAKE_CASE_ ,text_b=SCREAMING_SNAKE_CASE_ ,label=SCREAMING_SNAKE_CASE_ ,pairID=SCREAMING_SNAKE_CASE_ ) ) return examples def lowercase ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Tuple: '''simple docstring''' snake_case : List[Any] = {label: i for i, label in enumerate(__A )} snake_case : Union[str, Any] = [] for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc="""convert examples to features""" ): if ex_index % 1_0000 == 0: logger.info("""Writing example %d""" % (ex_index) ) snake_case : Union[str, Any] = tokenizer( example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding="""max_length""" , truncation=__A , return_overflowing_tokens=__A , ) snake_case : Tuple = label_map[example.label] if example.label in label_map else 0 snake_case : Tuple = int(example.pairID ) features.append(InputFeatures(**__A , label=__A , pairID=__A ) ) for i, example in enumerate(examples[:5] ): logger.info("""*** Example ***""" ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features __lowercase : Dict = { '''hans''': 3, } __lowercase : Union[str, Any] = { '''hans''': HansProcessor, }
36
0
"""simple docstring""" import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase): '''simple docstring''' __magic_name__ : List[str] = RoCBertTokenizer __magic_name__ : List[Any] = None __magic_name__ : Dict = False __magic_name__ : Optional[Any] = True __magic_name__ : List[str] = filter_non_english def _UpperCAmelCase ( self : int ): super().setUp() A__ : List[str] =["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"] A__ : List[str] ={} A__ : List[Any] ={} for i, value in enumerate(UpperCamelCase__ ): A__ : Dict =i A__ : Optional[int] =i A__ : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) A__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] ) A__ : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer: json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer: json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) def _UpperCAmelCase ( self : Optional[Any] ): A__ : int =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) A__ : int =tokenizer.tokenize("你好[SEP]你是谁" ) self.assertListEqual(UpperCamelCase__ , ["你", "好", "[SEP]", "你", "是", "谁"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__ ) , [5, 6, 2, 5, 7, 8] ) def _UpperCAmelCase ( self : Dict ): A__ : str =RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def _UpperCAmelCase ( self : int ): A__ : Dict =RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _UpperCAmelCase ( self : List[Any] ): A__ : Dict =RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def _UpperCAmelCase ( self : Union[str, Any] ): A__ : str =RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _UpperCAmelCase ( self : Any ): A__ : Optional[Any] =RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _UpperCAmelCase ( self : Tuple ): A__ : int =RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def _UpperCAmelCase ( self : List[Any] ): A__ : Dict =RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def _UpperCAmelCase ( self : Dict ): A__ : int =RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def _UpperCAmelCase ( self : Optional[Any] ): A__ : Any =RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def _UpperCAmelCase ( self : List[str] ): A__ : Dict =["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] A__ : Union[str, Any] ={} for i, token in enumerate(UpperCamelCase__ ): A__ : Any =i A__ : Any =RoCBertWordpieceTokenizer(vocab=UpperCamelCase__ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def _UpperCAmelCase ( self : Union[str, Any] ): self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def _UpperCAmelCase ( self : List[str] ): self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def _UpperCAmelCase ( self : int ): self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def _UpperCAmelCase ( self : Optional[Any] ): A__ : Tuple =self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(UpperCamelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) if self.test_rust_tokenizer: A__ : List[Any] =self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(UpperCamelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) def _UpperCAmelCase ( self : Optional[int] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A__ : List[Any] =self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) A__ : List[str] =F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' A__ : List[str] =tokenizer_r.encode_plus( UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , ) A__ : List[str] =tokenizer_r.do_lower_case if hasattr(UpperCamelCase__ , "do_lower_case" ) else False A__ : Dict =( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def _UpperCAmelCase ( self : List[Any] ): A__ : int =["的", "人", "有"] A__ : List[str] ="".join(UpperCamelCase__ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A__ : List[Any] =True A__ : Optional[Any] =self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) A__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) A__ : Any =tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) A__ : Any =tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) A__ : str =tokenizer_r.convert_ids_to_tokens(UpperCamelCase__ ) A__ : Tuple =tokenizer_p.convert_ids_to_tokens(UpperCamelCase__ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) A__ : Any =False A__ : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) A__ : List[str] =self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) A__ : int =tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) A__ : Tuple =tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) A__ : str =tokenizer_r.convert_ids_to_tokens(UpperCamelCase__ ) A__ : Union[str, Any] =tokenizer_p.convert_ids_to_tokens(UpperCamelCase__ ) # it is expected that only the first Chinese character is not preceded by "##". A__ : Tuple =[ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(UpperCamelCase__ ) ] self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) @slow def _UpperCAmelCase ( self : List[str] ): A__ : str =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) A__ : Optional[int] =tokenizer.encode("你好" , add_special_tokens=UpperCamelCase__ ) A__ : Tuple =tokenizer.encode("你是谁" , add_special_tokens=UpperCamelCase__ ) A__ : str =tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ ) A__ : str =tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def _UpperCAmelCase ( self : int ): A__ : Union[str, Any] =self.get_tokenizers(do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): A__ : List[str] ="你好,你是谁" A__ : Optional[int] =tokenizer.tokenize(UpperCamelCase__ ) A__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) A__ : List[Any] =tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__ ) A__ : str =tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__ ) A__ : List[Any] =tokenizer.prepare_for_model( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) A__ : Union[str, Any] =tokenizer.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
720
"""simple docstring""" from collections import defaultdict def lowercase ( UpperCamelCase : int ): """simple docstring""" A__ : Union[str, Any] =1 A__ : int =True for v in tree[start]: if v not in visited: ret += dfs(UpperCamelCase ) if ret % 2 == 0: cuts.append(UpperCamelCase ) return ret def lowercase ( ): """simple docstring""" dfs(1 ) if __name__ == "__main__": __A , __A : List[str] = 10, 9 __A : Dict = defaultdict(list) __A : dict[int, bool] = {} __A : list[int] = [] __A : List[str] = 0 __A : str = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
595
0
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING __snake_case = logging.get_logger(__name__) @add_end_docstrings(_a ) class __lowerCamelCase (_a ): def __init__( self: str,*A_: List[Any],**A_: Optional[int] ): '''simple docstring''' super().__init__(*A_,**A_ ) requires_backends(self,'vision' ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def snake_case_ ( self: Dict,A_: Tuple=None ): '''simple docstring''' __UpperCamelCase = {} if top_k is not None: __UpperCamelCase = top_k return {}, {}, postprocess_params def __call__( self: List[Any],A_: Union[str, List[str], "Image.Image", List["Image.Image"]],**A_: Optional[int] ): '''simple docstring''' return super().__call__(A_,**A_ ) def snake_case_ ( self: Optional[Any],A_: List[Any] ): '''simple docstring''' __UpperCamelCase = load_image(A_ ) __UpperCamelCase = self.image_processor(images=A_,return_tensors=self.framework ) return model_inputs def snake_case_ ( self: str,A_: Optional[Any] ): '''simple docstring''' __UpperCamelCase = self.model(**A_ ) return model_outputs def snake_case_ ( self: Optional[Any],A_: Dict,A_: Optional[int]=5 ): '''simple docstring''' if top_k > self.model.config.num_labels: __UpperCamelCase = self.model.config.num_labels if self.framework == "pt": __UpperCamelCase = model_outputs.logits.softmax(-1 )[0] __UpperCamelCase, __UpperCamelCase = probs.topk(A_ ) elif self.framework == "tf": __UpperCamelCase = stable_softmax(model_outputs.logits,axis=-1 )[0] __UpperCamelCase = tf.math.top_k(A_,k=A_ ) __UpperCamelCase, __UpperCamelCase = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) __UpperCamelCase = scores.tolist() __UpperCamelCase = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A_,A_ )]
1
"""simple docstring""" import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ): @register_to_config def __init__( self : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : float , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : bool = False , ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Optional[int] = nn.Embedding(snake_case__ , snake_case__ ) UpperCAmelCase__ : List[Any] = nn.Embedding(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Optional[int] = nn.Dropout(p=snake_case__ ) UpperCAmelCase__ : Optional[int] = TaConfig( vocab_size=snake_case__ , d_model=snake_case__ , num_heads=snake_case__ , d_kv=snake_case__ , d_ff=snake_case__ , dropout_rate=snake_case__ , feed_forward_proj=snake_case__ , is_decoder=snake_case__ , is_encoder_decoder=snake_case__ , ) UpperCAmelCase__ : Tuple = nn.ModuleList() for lyr_num in range(snake_case__ ): UpperCAmelCase__ : Tuple = TaBlock(snake_case__ ) self.encoders.append(snake_case__ ) UpperCAmelCase__ : str = TaLayerNorm(snake_case__ ) UpperCAmelCase__ : Tuple = nn.Dropout(p=snake_case__ ) def __a ( self : int , snake_case__ : List[str] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.token_embedder(snake_case__ ) UpperCAmelCase__ : Optional[int] = encoder_input_tokens.shape[1] UpperCAmelCase__ : List[Any] = torch.arange(snake_case__ , device=encoder_input_tokens.device ) x += self.position_encoding(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = self.dropout_pre(snake_case__ ) # inverted the attention mask UpperCAmelCase__ : List[Any] = encoder_input_tokens.size() UpperCAmelCase__ : Tuple = self.get_extended_attention_mask(snake_case__ , snake_case__ ) for lyr in self.encoders: UpperCAmelCase__ : Any = lyr(snake_case__ , snake_case__ )[0] UpperCAmelCase__ : Any = self.layer_norm(snake_case__ ) return self.dropout_post(snake_case__ ), encoder_inputs_mask
438
0
from sklearn.metrics import fa_score import datasets lowercase_ = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n' lowercase_ = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n' lowercase_ = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ ( datasets.Metric ): '''simple docstring''' def _snake_case ( self: str ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('int32' ) ), 'references': datasets.Sequence(datasets.Value('int32' ) ), } if self.config_name == 'multilabel' else { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , ) def _snake_case ( self: Union[str, Any] , a: str , a: str , a: Tuple=None , a: Optional[int]=1 , a: List[str]="binary" , a: Tuple=None ): __lowerCamelCase : int = fa_score( a , a , labels=a , pos_label=a , average=a , sample_weight=a ) return {"f1": float(a ) if score.size == 1 else score}
230
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging lowercase_ = logging.get_logger(__name__) def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase : Tuple = nn.functional.normalize(SCREAMING_SNAKE_CASE__ ) return torch.mm(SCREAMING_SNAKE_CASE__ , normalized_text_embeds.t() ) class A_ ( __UpperCamelCase ): '''simple docstring''' __snake_case = CLIPConfig __snake_case = ["""CLIPEncoderLayer"""] def __init__( self: List[Any] , a: CLIPConfig ): super().__init__(a ) __lowerCamelCase : List[str] = CLIPVisionModel(config.vision_config ) __lowerCamelCase : Union[str, Any] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=a ) __lowerCamelCase : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=a ) __lowerCamelCase : List[str] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=a ) __lowerCamelCase : Any = nn.Parameter(torch.ones(17 ) , requires_grad=a ) __lowerCamelCase : Any = nn.Parameter(torch.ones(3 ) , requires_grad=a ) @torch.no_grad() def _snake_case ( self: Any , a: List[Any] , a: Union[str, Any] ): __lowerCamelCase : Optional[Any] = self.vision_model(a )[1] # pooled_output __lowerCamelCase : Dict = self.visual_projection(a ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __lowerCamelCase : int = cosine_distance(a , self.special_care_embeds ).cpu().float().numpy() __lowerCamelCase : Optional[int] = cosine_distance(a , self.concept_embeds ).cpu().float().numpy() __lowerCamelCase : List[str] = [] __lowerCamelCase : Tuple = image_embeds.shape[0] for i in range(a ): __lowerCamelCase : int = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images __lowerCamelCase : int = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): __lowerCamelCase : List[Any] = special_cos_dist[i][concept_idx] __lowerCamelCase : str = self.special_care_embeds_weights[concept_idx].item() __lowerCamelCase : Tuple = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} ) __lowerCamelCase : Optional[Any] = 0.0_1 for concept_idx in range(len(cos_dist[0] ) ): __lowerCamelCase : Optional[Any] = cos_dist[i][concept_idx] __lowerCamelCase : Union[str, Any] = self.concept_embeds_weights[concept_idx].item() __lowerCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(a ) result.append(a ) __lowerCamelCase : Tuple = [len(res['bad_concepts'] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def _snake_case ( self: str , a: torch.FloatTensor , a: torch.FloatTensor ): __lowerCamelCase : Optional[int] = self.vision_model(a )[1] # pooled_output __lowerCamelCase : str = self.visual_projection(a ) __lowerCamelCase : str = cosine_distance(a , self.special_care_embeds ) __lowerCamelCase : Dict = cosine_distance(a , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images __lowerCamelCase : List[str] = 0.0 __lowerCamelCase : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) __lowerCamelCase : int = torch.any(special_scores > 0 , dim=1 ) __lowerCamelCase : List[str] = special_care * 0.0_1 __lowerCamelCase : str = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) __lowerCamelCase : Dict = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) __lowerCamelCase : Optional[Any] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
230
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a : List[str] = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Any = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
640
'''simple docstring''' import operator as op def __magic_name__ ( __UpperCAmelCase ) -> Dict: '''simple docstring''' snake_case_ = [] snake_case_ = lambda __UpperCAmelCase, __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation snake_case_ = { '''^''': op.pow, '''*''': op.mul, '''/''': div, '''+''': op.add, '''-''': op.sub, } # operators & their respective operation # print table header print('''Symbol'''.center(8 ), '''Action'''.center(12 ), '''Stack''', sep=''' | ''' ) print('''-''' * (30 + len(__UpperCAmelCase )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(__UpperCAmelCase ) # append x to stack # output in tabular format print(x.rjust(8 ), ('''push(''' + x + ''')''').ljust(12 ), ''','''.join(__UpperCAmelCase ), sep=''' | ''' ) else: snake_case_ = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8 ), ('''pop(''' + b + ''')''').ljust(12 ), ''','''.join(__UpperCAmelCase ), sep=''' | ''' ) snake_case_ = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8 ), ('''pop(''' + a + ''')''').ljust(12 ), ''','''.join(__UpperCAmelCase ), sep=''' | ''' ) stack.append( str(opr[x](int(__UpperCAmelCase ), int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ), ('''push(''' + a + x + b + ''')''').ljust(12 ), ''','''.join(__UpperCAmelCase ), sep=''' | ''', ) return int(stack[0] ) if __name__ == "__main__": a : Any = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ') print('\n\tResult = ', solve(Postfix))
640
1
import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _a ( A__ , unittest.TestCase ): """simple docstring""" snake_case =PhobertTokenizer snake_case =False def SCREAMING_SNAKE_CASE ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCAmelCase =["T@@", "i", "I", "R@@", "r", "e@@"] _UpperCAmelCase =dict(zip(_snake_case , range(len(_snake_case ) ) ) ) _UpperCAmelCase =["#version: 0.2", "l à</w>"] _UpperCAmelCase ={"unk_token": "<unk>"} _UpperCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) _UpperCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: for token in vocab_tokens: fp.write(F"{token} {vocab_tokens[token]}\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(_snake_case ) ) def SCREAMING_SNAKE_CASE ( self , **_snake_case ): kwargs.update(self.special_tokens_map ) return PhobertTokenizer.from_pretrained(self.tmpdirname , **_snake_case ) def SCREAMING_SNAKE_CASE ( self , _snake_case ): _UpperCAmelCase ="Tôi là VinAI Research" _UpperCAmelCase ="T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>" return input_text, output_text def SCREAMING_SNAKE_CASE ( self ): _UpperCAmelCase =PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _UpperCAmelCase ="Tôi là VinAI Research" _UpperCAmelCase ="T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split() _UpperCAmelCase =tokenizer.tokenize(_snake_case ) print(_snake_case ) self.assertListEqual(_snake_case , _snake_case ) _UpperCAmelCase =tokens + [tokenizer.unk_token] _UpperCAmelCase =[4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
592
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( 'stable diffusion controlnet', '0.22.0', 'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.', standard_warn=False, stacklevel=3, )
592
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCamelCase = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
146
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class lowercase : '''simple docstring''' __SCREAMING_SNAKE_CASE = BlenderbotSmallConfig __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = """gelu""" def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = parent UpperCAmelCase__ = batch_size UpperCAmelCase__ = seq_length UpperCAmelCase__ = is_training UpperCAmelCase__ = use_labels UpperCAmelCase__ = vocab_size UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = max_position_embeddings UpperCAmelCase__ = eos_token_id UpperCAmelCase__ = pad_token_id UpperCAmelCase__ = bos_token_id def UpperCamelCase__ (self ) -> Any: """simple docstring""" UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCAmelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCAmelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCAmelCase__ = prepare_blenderbot_small_inputs_dict(__a , __a , __a ) return config, inputs_dict def UpperCamelCase__ (self , __a , __a ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = TFBlenderbotSmallModel(config=__a ).get_decoder() UpperCAmelCase__ = inputs_dict['input_ids'] UpperCAmelCase__ = input_ids[:1, :] UpperCAmelCase__ = inputs_dict['attention_mask'][:1, :] UpperCAmelCase__ = inputs_dict['head_mask'] UpperCAmelCase__ = 1 # first forward pass UpperCAmelCase__ = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a ) UpperCAmelCase__ , UpperCAmelCase__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCAmelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCAmelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCAmelCase__ = model(__a , attention_mask=__a )[0] UpperCAmelCase__ = model(__a , attention_mask=__a , past_key_values=__a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCAmelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__a , __a , rtol=1E-3 ) def UpperCamelCase_( snake_case__: Any , snake_case__: List[str] , snake_case__: Dict , snake_case__: Any=None , snake_case__: int=None , snake_case__: int=None , snake_case__: int=None , snake_case__: Optional[int]=None , ) -> int: if attention_mask is None: UpperCAmelCase__ = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __SCREAMING_SNAKE_CASE = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __SCREAMING_SNAKE_CASE = ( { """conversational""": TFBlenderbotSmallForConditionalGeneration, """feature-extraction""": TFBlenderbotSmallModel, """summarization""": TFBlenderbotSmallForConditionalGeneration, """text2text-generation""": TFBlenderbotSmallForConditionalGeneration, """translation""": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = TFBlenderbotSmallModelTester(self ) UpperCAmelCase__ = ConfigTester(self , config_class=__a ) def UpperCamelCase__ (self ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ (self ) -> str: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__a ) @require_tokenizers @require_tf class lowercase ( unittest.TestCase ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [ """Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """ """ i'm going to throw up.\nand why is that?""" ] __SCREAMING_SNAKE_CASE = """facebook/blenderbot_small-90M""" @cached_property def UpperCamelCase__ (self ) -> Optional[Any]: """simple docstring""" return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) @cached_property def UpperCamelCase__ (self ) -> Any: """simple docstring""" UpperCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase__ (self ) -> List[str]: """simple docstring""" UpperCAmelCase__ = self.tokenizer(self.src_text , return_tensors='tf' ) UpperCAmelCase__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__a , ) UpperCAmelCase__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
146
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class _UpperCAmelCase ( unittest.TestCase ): def snake_case_ ( self): A__ = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0]) A__ = get_activation('''gelu''') self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase__) , torch_builtin(UpperCAmelCase__))) self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase__) , gelu_new(UpperCAmelCase__))) def snake_case_ ( self): A__ = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0]) A__ = get_activation('''gelu''') A__ = get_activation('''gelu_10''') A__ = torch_builtin(UpperCAmelCase__) A__ = geluaa(UpperCAmelCase__) A__ = torch.where(y_gelu_aa < 1_0.0 , 1 , 0) self.assertTrue(torch.max(UpperCAmelCase__).item() == 1_0.0) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask)) def snake_case_ ( self): get_activation('''gelu''') get_activation('''gelu_10''') get_activation('''gelu_fast''') get_activation('''gelu_new''') get_activation('''gelu_python''') get_activation('''gelu_pytorch_tanh''') get_activation('''linear''') get_activation('''mish''') get_activation('''quick_gelu''') get_activation('''relu''') get_activation('''sigmoid''') get_activation('''silu''') get_activation('''swish''') get_activation('''tanh''') with self.assertRaises(UpperCAmelCase__): get_activation('''bogus''') with self.assertRaises(UpperCAmelCase__): get_activation(UpperCAmelCase__) def snake_case_ ( self): A__ = get_activation('''gelu''') A__ = 1 A__ = get_activation('''gelu''') self.assertEqual(acta.a , 1) with self.assertRaises(UpperCAmelCase__): A__ = acta.a
718
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) class _UpperCAmelCase ( A__ ): UpperCamelCase__ = '''timm_backbone''' def __init__( self , a__=None , a__=3 , a__=True , a__=True , a__=None , **a__ , ): super().__init__(**a__) A__ = backbone A__ = num_channels A__ = features_only A__ = use_pretrained_backbone A__ = True A__ = out_indices if out_indices is not None else (-1,)
526
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __UpperCamelCase : Optional[Any] = { '''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = [ '''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FalconForCausalLM''', '''FalconModel''', '''FalconPreTrainedModel''', '''FalconForSequenceClassification''', '''FalconForTokenClassification''', '''FalconForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys __UpperCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def __A(lowerCAmelCase ) -> Dict: """simple docstring""" _UpperCamelCase = os.path.join(args.tf_model_dir , """parameters.json""" ) _UpperCamelCase = json.loads(open(lowerCAmelCase ).read() ) if not params: raise ValueError( F'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' ) if not args.output.endswith(""".pt""" ): _UpperCamelCase = args.output + """.pt""" _UpperCamelCase = OrderedDict() with tf.device("""/CPU:0""" ): _UpperCamelCase = tf.train.load_checkpoint(args.tf_model_dir ) _UpperCamelCase = reader.get_variable_to_shape_map() for key_name in shapes.keys(): _UpperCamelCase = reader.get_tensor(lowerCAmelCase ).astype(np.floataa ) if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ): continue if key_name.startswith("""pasts/""" ): if key_name.startswith("""pasts/mlp""" ): _UpperCamelCase = int(key_name[9] ) elif key_name.startswith("""pasts/out""" ): _UpperCamelCase = 8 _UpperCamelCase = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time _UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _UpperCamelCase = torch.tensor(lowerCAmelCase ) elif key_name.startswith("""model/moe""" ): _UpperCamelCase = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/switch_gating/kernel""" ): _UpperCamelCase = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player _UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _UpperCamelCase = torch.tensor(lowerCAmelCase ) elif key_name.endswith("""/softmlp/kernel""" ): _UpperCamelCase = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player _UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _UpperCamelCase = torch.tensor(lowerCAmelCase ) elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ): _UpperCamelCase = key_name[-9:-7] for i in range(1_6 ): _UpperCamelCase = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer) _UpperCamelCase = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided _UpperCamelCase = torch.tensor(lowerCAmelCase ) elif key_name.startswith("""model/mlp""" ): _UpperCamelCase = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/p1/kernel""" ): _UpperCamelCase = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player _UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _UpperCamelCase = torch.tensor(lowerCAmelCase ) elif key_name.endswith("""/p1/bias""" ): _UpperCamelCase = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player _UpperCamelCase = vnp.copy() # same because it is one dimensional _UpperCamelCase = torch.tensor(lowerCAmelCase ) elif key_name.endswith("""/p2/kernel""" ): _UpperCamelCase = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player _UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _UpperCamelCase = torch.tensor(lowerCAmelCase ) elif key_name.endswith("""/p2/bias""" ): _UpperCamelCase = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player _UpperCamelCase = vnp.copy() # same because it is one dimensional _UpperCamelCase = torch.tensor(lowerCAmelCase ) elif key_name.startswith("""model/ln""" ): _UpperCamelCase = int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): _UpperCamelCase = """model.blocks.%d.feed_forward.norm.bias""" % player _UpperCamelCase = vnp.copy() # same because it is one dimensional _UpperCamelCase = torch.tensor(lowerCAmelCase ) elif key_name.endswith("""/g""" ): _UpperCamelCase = """model.blocks.%d.feed_forward.norm.weight""" % player _UpperCamelCase = vnp.copy() # same because it is one dimensional _UpperCamelCase = torch.tensor(lowerCAmelCase ) elif key_name.startswith("""model/att""" ): _UpperCamelCase = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/qkv/kernel""" ): _UpperCamelCase = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum _UpperCamelCase = state[:, 0, :, :] _UpperCamelCase = state[:, 1, :, :] _UpperCamelCase = state[:, 2, :, :] _UpperCamelCase = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _UpperCamelCase = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _UpperCamelCase = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _UpperCamelCase = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player _UpperCamelCase = torch.tensor(lowerCAmelCase ) _UpperCamelCase = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player _UpperCamelCase = torch.tensor(lowerCAmelCase ) _UpperCamelCase = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player _UpperCamelCase = torch.tensor(lowerCAmelCase ) elif key_name.endswith("""/o/kernel""" ): _UpperCamelCase = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player _UpperCamelCase = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix _UpperCamelCase = torch.tensor(lowerCAmelCase ) elif key_name.startswith("""model/an""" ): _UpperCamelCase = int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): _UpperCamelCase = """model.blocks.%d.self_attn.norm.bias""" % player _UpperCamelCase = vnp.copy() # same because it is one dimensional _UpperCamelCase = torch.tensor(lowerCAmelCase ) elif key_name.endswith("""/g""" ): _UpperCamelCase = """model.blocks.%d.self_attn.norm.weight""" % player _UpperCamelCase = vnp.copy() # same because it is one dimensional _UpperCamelCase = torch.tensor(lowerCAmelCase ) elif ( key_name.startswith("""model/wte""" ) or key_name.startswith("""model/wpe""" ) or key_name.startswith("""model/ete""" ) ): _UpperCamelCase = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[ key_name[-3:] ] _UpperCamelCase = """model.%s.weight""" % nlayer _UpperCamelCase = vnp.copy() # same in embedded _UpperCamelCase = torch.tensor(lowerCAmelCase ) if key_name.startswith("""model/wte""" ): _UpperCamelCase = """lm_head.weight""" _UpperCamelCase = vnp.copy() # same in embedded _UpperCamelCase = torch.tensor(lowerCAmelCase ) elif key_name.startswith("""model/wob""" ): _UpperCamelCase = """final_logits_bias""" _UpperCamelCase = vnp.copy() # same in embedded _UpperCamelCase = state.reshape((1, -1) ) _UpperCamelCase = torch.tensor(lowerCAmelCase ) elif key_name == "model/dense/kernel": _UpperCamelCase = """model.last_project.weight""" _UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _UpperCamelCase = torch.tensor(lowerCAmelCase ) elif key_name == "model/dense_1/bias": _UpperCamelCase = """model.last_project.bias""" _UpperCamelCase = vnp.copy() # same because it is one dimensional _UpperCamelCase = torch.tensor(lowerCAmelCase ) torch.save(lowerCAmelCase , args.output ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser( description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") lowerCamelCase__ = parser.parse_args() convert_tf_gptsan_to_pt(args)
612
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Tuple = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Dict = ["EncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Dict = ["TFEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["FlaxEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
509
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: SCREAMING_SNAKE_CASE__ : str = None SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE__ : Optional[int] = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", }, "tokenizer_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json", }, } SCREAMING_SNAKE_CASE__ : Dict = { "camembert-base": 512, } SCREAMING_SNAKE_CASE__ : int = "▁" class A_ ( _UpperCAmelCase ): """simple docstring""" lowercase : Optional[Any] = VOCAB_FILES_NAMES lowercase : int = PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[str] = ["input_ids", "attention_mask"] lowercase : Union[str, Any] = CamembertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **__UpperCAmelCase , ) -> Dict: # Mask token behave like a normal word, i.e. include the space before it a : List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , ) a : Optional[int] = vocab_file a : Union[str, Any] = False if not self.vocab_file else True def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a : Optional[Any] = [self.cls_token_id] a : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: a : Tuple = [self.sep_token_id] a : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(__UpperCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return a : Optional[Any] = os.path.join( __UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
509
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @slow def _lowerCamelCase ( self ): """simple docstring""" __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_snake_case ).to(_snake_case ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids __lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids __lowerCamelCase = model(input_ids.to(_snake_case ) , labels=labels.to(_snake_case ) ).loss __lowerCamelCase = -(labels.shape[-1] * loss.item()) __lowerCamelCase = -8_4.9_1_2_7 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
316
'''simple docstring''' import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def lowerCamelCase_ ( A_ , A_ ): assert isinstance(A_ , A_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowerCamelCase_ ( A_ , A_ , A_ , A_ ): __lowerCamelCase = tmp_path / '''cache''' __lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCamelCase = SqlDatasetReader( '''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=A_ , keep_in_memory=A_ ).read() _check_sql_dataset(A_ , A_ ) @require_sqlalchemy @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def lowerCamelCase_ ( A_ , A_ , A_ , A_ ): __lowerCamelCase = tmp_path / '''cache''' __lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __lowerCamelCase = features.copy() if features else default_expected_features __lowerCamelCase = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCamelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=A_ , cache_dir=A_ ).read() _check_sql_dataset(A_ , A_ ) def lowerCamelCase_ ( A_ ): with contextlib.closing(sqlitea.connect(A_ ) ) as con: __lowerCamelCase = con.cursor() cur.execute('''SELECT * FROM dataset''' ) for row in cur: yield row @require_sqlalchemy def lowerCamelCase_ ( A_ , A_ , A_ ): __lowerCamelCase = tmp_path / '''cache''' __lowerCamelCase = os.path.join(A_ , '''tmp.sql''' ) __lowerCamelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=A_ ).read() SqlDatasetWriter(A_ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write() __lowerCamelCase = iter_sql_file(A_ ) __lowerCamelCase = iter_sql_file(A_ ) for rowa, rowa in zip(A_ , A_ ): assert rowa == rowa @require_sqlalchemy def lowerCamelCase_ ( A_ , A_ , A_ ): __lowerCamelCase = tmp_path / '''cache''' __lowerCamelCase = os.path.join(A_ , '''tmp.sql''' ) __lowerCamelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=A_ ).read() SqlDatasetWriter(A_ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write() __lowerCamelCase = iter_sql_file(A_ ) __lowerCamelCase = iter_sql_file(A_ ) for rowa, rowa in zip(A_ , A_ ): assert rowa == rowa @require_sqlalchemy def lowerCamelCase_ ( A_ , A_ , A_ ): __lowerCamelCase = tmp_path / '''cache''' __lowerCamelCase = os.path.join(A_ , '''tmp.sql''' ) __lowerCamelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=A_ ).read() with pytest.raises(A_ ): SqlDatasetWriter(A_ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
316
1
"""simple docstring""" import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class __lowerCAmelCase : '''simple docstring''' @property def __UpperCAmelCase ( self ): return self.get_dummy_input() @property def __UpperCAmelCase ( self ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def __UpperCAmelCase ( self , _a=True , _a=False , _a=False , _a=False , ): __a = 4 __a = 32 __a = (32, 32) __a = torch.manual_seed(0 ) __a = torch.device(_a ) __a = (batch_size, num_channels) + sizes __a = randn_tensor(_a , generator=_a , device=_a ) __a = {'''hidden_states''': hidden_states} if include_temb: __a = 128 __a = randn_tensor((batch_size, temb_channels) , generator=_a , device=_a ) if include_res_hidden_states_tuple: __a = torch.manual_seed(1 ) __a = (randn_tensor(_a , generator=_a , device=_a ),) if include_encoder_hidden_states: __a = floats_tensor((batch_size, 32, 32) ).to(_a ) if include_skip_sample: __a = randn_tensor(((batch_size, 3) + sizes) , generator=_a , device=_a ) return dummy_input def __UpperCAmelCase ( self ): __a = { '''in_channels''': 32, '''out_channels''': 32, '''temb_channels''': 128, } if self.block_type == "up": __a = 32 if self.block_type == "mid": init_dict.pop('''out_channels''' ) __a = self.dummy_input return init_dict, inputs_dict def __UpperCAmelCase ( self , _a ): __a , __a = self.prepare_init_args_and_inputs_for_common() __a = self.block_class(**_a ) unet_block.to(_a ) unet_block.eval() with torch.no_grad(): __a = unet_block(**_a ) if isinstance(_a , _a ): __a = output[0] self.assertEqual(output.shape , self.output_shape ) __a = output[0, -1, -3:, -3:] __a = torch.tensor(_a ).to(_a ) assert torch_all_close(output_slice.flatten() , _a , atol=5E-3 ) @unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' ) def __UpperCAmelCase ( self ): __a , __a = self.prepare_init_args_and_inputs_for_common() __a = self.block_class(**_a ) model.to(_a ) model.train() __a = model(**_a ) if isinstance(_a , _a ): __a = output[0] __a = torch.device(_a ) __a = randn_tensor(output.shape , device=_a ) __a = torch.nn.functional.mse_loss(_a , _a ) loss.backward()
65
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowercase_ = abspath(join(dirname(dirname(dirname(__file__))), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def lowercase ( lowerCAmelCase__ : List[Any] ) -> str: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(lowerCAmelCase__ ) def lowercase ( lowerCAmelCase__ : int ) -> Union[str, Any]: from transformers.testing_utils import pytest_terminal_summary_main __a = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(lowerCAmelCase__ , id=lowerCAmelCase__ )
65
1
import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) a_ : Any = logging.getLogger() def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'all_results.json') if os.path.exists(_UpperCAmelCase): with open(_UpperCAmelCase , 'r') as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) else: raise ValueError(F'''can\'t find {path}''') return results a_ : Union[str, Any] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class _snake_case ( A__ ): def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: import xla_spawn SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE = f''' ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(a , 'argv' , a): SCREAMING_SNAKE_CASE = time() xla_spawn.main() SCREAMING_SNAKE_CASE = time() SCREAMING_SNAKE_CASE = get_results(a) self.assertGreaterEqual(result['eval_accuracy'] , 0.75) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: import xla_spawn SCREAMING_SNAKE_CASE = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split() with patch.object(a , 'argv' , a): xla_spawn.main()
73
"""simple docstring""" from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase__ ( snake_case__ ): @staticmethod @abstractmethod def UpperCAmelCase__ ( snake_case__ : ArgumentParser ): raise NotImplementedError() @abstractmethod def UpperCAmelCase__ ( self : Union[str, Any] ): raise NotImplementedError()
153
0
'''simple docstring''' from __future__ import annotations lowercase : List[Any] = [] def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' for i in range(len(snake_case__ ) ): if board[row][i] == 1: return False for i in range(len(snake_case__ ) ): if board[i][column] == 1: return False for i, j in zip(range(snake_case__ , -1 , -1 ) , range(snake_case__ , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(snake_case__ , -1 , -1 ) , range(snake_case__ , len(snake_case__ ) ) ): if board[i][j] == 1: return False return True def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if row >= len(snake_case__ ): solution.append(snake_case__ ) printboard(snake_case__ ) print() return True for i in range(len(snake_case__ ) ): if is_safe(snake_case__ , snake_case__ , snake_case__ ): A : Any = 1 solve(snake_case__ , row + 1 ) A : List[str] = 0 return False def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for i in range(len(snake_case__ ) ): for j in range(len(snake_case__ ) ): if board[i][j] == 1: print('''Q''' , end=''' ''' ) else: print('''.''' , end=''' ''' ) print() # n=int(input("The no. of queens")) lowercase : List[str] = 8 lowercase : int = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
343
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : Union[str, Any] = { 'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json', 'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class A ( __snake_case ): __magic_name__ = '''mobilenet_v1''' def __init__( self , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE="relu6" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.999 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=0.001 , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''' ) A : Optional[Any] = num_channels A : List[str] = image_size A : List[str] = depth_multiplier A : Optional[Any] = min_depth A : Dict = hidden_act A : List[str] = tf_padding A : Any = classifier_dropout_prob A : Union[str, Any] = initializer_range A : Union[str, Any] = layer_norm_eps class A ( __snake_case ): __magic_name__ = version.parse('''1.11''' ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict([('''pixel_values''', {0: '''batch'''})] ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})] ) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] ) @property def __lowerCAmelCase ( self ) -> float: """simple docstring""" return 1e-4
343
1
'''simple docstring''' from ....utils import logging UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) class _UpperCamelCase ( lowerCamelCase__ ): '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Optional[Any]=2_0_4_8 ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = config.__dict__ __SCREAMING_SNAKE_CASE : str = modal_hidden_size if num_labels: __SCREAMING_SNAKE_CASE : List[Any] = num_labels
578
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ : str = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = [ '''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTMSNModel''', '''ViTMSNForImageClassification''', '''ViTMSNPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys UpperCamelCase__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
578
1
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Tuple = list(range(len(UpperCamelCase ) ) ) lowerCAmelCase__ : Any = [v / w for v, w in zip(UpperCamelCase , UpperCamelCase )] index.sort(key=lambda UpperCamelCase : ratio[i] , reverse=UpperCamelCase ) lowerCAmelCase__ : float = 0 lowerCAmelCase__ : list[float] = [0] * len(UpperCamelCase ) for i in index: if weight[i] <= capacity: lowerCAmelCase__ : List[str] = 1 max_value += value[i] capacity -= weight[i] else: lowerCAmelCase__ : Optional[Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
719
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : List[str] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : int = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : int = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : List[str] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[Any] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : List[str] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : List[Any] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Union[str, Any] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : str = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : int = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : str = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls ,["""torch"""] ) def _SCREAMING_SNAKE_CASE ( *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(UpperCamelCase , ["""torch"""] ) def _SCREAMING_SNAKE_CASE ( *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(UpperCamelCase , ["""torch"""] ) def _SCREAMING_SNAKE_CASE ( *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(UpperCamelCase , ["""torch"""] ) def _SCREAMING_SNAKE_CASE ( *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(UpperCamelCase , ["""torch"""] ) def _SCREAMING_SNAKE_CASE ( *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(UpperCamelCase , ["""torch"""] ) def _SCREAMING_SNAKE_CASE ( *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(UpperCamelCase , ["""torch"""] ) def _SCREAMING_SNAKE_CASE ( *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(UpperCamelCase , ["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : str = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Union[str, Any] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : str = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : List[str] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : str = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[int] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[int] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Any = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : List[Any] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Union[str, Any] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Any = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : List[Any] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[int] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[Any] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Tuple = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[int] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : List[str] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : List[str] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : str = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Union[str, Any] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[Any] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Tuple = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Union[str, Any] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : int = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : str = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Union[str, Any] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[int] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Any = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : List[str] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Union[str, Any] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[Any] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : List[str] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[Any] = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: requires_backends(cls ,["""torch"""] ) class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = ['''torch'''] def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: requires_backends(self ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: requires_backends(cls ,["""torch"""] ) @classmethod def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]: requires_backends(cls ,["""torch"""] )
160
0
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowercase : Tuple = logging.get_logger(__name__) __lowercase : Union[str, Any] = {'''vocab_file''': '''vocab.json'''} __lowercase : Any = { '''vocab_file''': { '''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''', } } __lowercase : str = {'''mgp-str''': 27} class __lowercase ( _lowercase ): lowerCamelCase : int = VOCAB_FILES_NAMES lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self , A , A="[GO]" , A="[GO]" , A="[s]" , A="[GO]" , **A ): super().__init__( unk_token=A , bos_token=A , eos_token=A , pad_token=A , **A , ) with open(A , encoding='''utf-8''' ) as vocab_handle: lowerCamelCase_ : List[Any] = json.load(A ) lowerCamelCase_ : Union[str, Any] = {v: k for k, v in self.vocab.items()} @property def UpperCAmelCase__ (self ): return len(self.vocab ) def UpperCAmelCase__ (self ): return dict(self.vocab , **self.added_tokens_encoder ) def UpperCAmelCase__ (self , A ): lowerCamelCase_ : int = [] for s in text: char_tokens.extend(A ) return char_tokens def UpperCAmelCase__ (self , A ): return self.vocab.get(A , self.vocab.get(self.unk_token ) ) def UpperCAmelCase__ (self , A ): return self.decoder.get(A ) def UpperCAmelCase__ (self , A , A = None ): if not os.path.isdir(A ): logger.error('''Vocabulary path ({}) should be a directory'''.format(A ) ) return lowerCamelCase_ : Dict = os.path.join( A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) with open(A , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=A , ensure_ascii=A ) + '''\n''' ) return (vocab_file,)
422
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def lowercase_ ( _lowercase , _lowercase , _lowercase = 1 , _lowercase = 1 , _lowercase = 1.0e4 , _lowercase = False , _lowercase = 1.0 , ) -> jnp.ndarray: '''simple docstring''' assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" lowerCamelCase_ : List[str] = float(embedding_dim // 2 ) lowerCamelCase_ : Optional[Any] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) lowerCamelCase_ : Any = min_timescale * jnp.exp(jnp.arange(_lowercase , dtype=jnp.floataa ) * -log_timescale_increment ) lowerCamelCase_ : Optional[Any] = jnp.expand_dims(_lowercase , 1 ) * jnp.expand_dims(_lowercase , 0 ) # scale embeddings lowerCamelCase_ : Tuple = scale * emb if flip_sin_to_cos: lowerCamelCase_ : Optional[int] = jnp.concatenate([jnp.cos(_lowercase ), jnp.sin(_lowercase )] , axis=1 ) else: lowerCamelCase_ : Tuple = jnp.concatenate([jnp.sin(_lowercase ), jnp.cos(_lowercase )] , axis=1 ) lowerCamelCase_ : Tuple = jnp.reshape(_lowercase , [jnp.shape(_lowercase )[0], embedding_dim] ) return signal class __lowercase ( nn.Module ): lowerCamelCase : int = 32 lowerCamelCase : jnp.dtype = jnp.floataa @nn.compact def __call__(self , A ): lowerCamelCase_ : Dict = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(A ) lowerCamelCase_ : Tuple = nn.silu(A ) lowerCamelCase_ : Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(A ) return temb class __lowercase ( nn.Module ): lowerCamelCase : int = 32 lowerCamelCase : bool = False lowerCamelCase : float = 1 @nn.compact def __call__(self , A ): return get_sinusoidal_embeddings( A , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
422
1
"""simple docstring""" from functools import lru_cache @lru_cache def _snake_case ( lowercase__ : int ) -> int: '''simple docstring''' if num < 0: raise ValueError("""Number should not be negative.""" ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
719
"""simple docstring""" import math def _snake_case ( ) -> None: '''simple docstring''' lowerCAmelCase_ :List[str] = input("""Enter message: """ ) lowerCAmelCase_ :Any = int(input(f"""Enter key [2-{len(lowercase__ ) - 1}]: """ ) ) lowerCAmelCase_ :str = input("""Encryption/Decryption [e/d]: """ ) if mode.lower().startswith("""e""" ): lowerCAmelCase_ :int = encrypt_message(lowercase__ , lowercase__ ) elif mode.lower().startswith("""d""" ): lowerCAmelCase_ :List[str] = decrypt_message(lowercase__ , lowercase__ ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(f"""Output:\n{text + "|"}""" ) def _snake_case ( lowercase__ : int , lowercase__ : str ) -> str: '''simple docstring''' lowerCAmelCase_ :int = [""""""] * key for col in range(lowercase__ ): lowerCAmelCase_ :str = col while pointer < len(lowercase__ ): cipher_text[col] += message[pointer] pointer += key return "".join(lowercase__ ) def _snake_case ( lowercase__ : int , lowercase__ : str ) -> str: '''simple docstring''' lowerCAmelCase_ :List[Any] = math.ceil(len(lowercase__ ) / key ) lowerCAmelCase_ :int = key lowerCAmelCase_ :Tuple = (num_cols * num_rows) - len(lowercase__ ) lowerCAmelCase_ :Any = [""""""] * num_cols lowerCAmelCase_ :Tuple = 0 lowerCAmelCase_ :Any = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): lowerCAmelCase_ :List[Any] = 0 row += 1 return "".join(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
256
0
'''simple docstring''' import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler UpperCAmelCase_ : Optional[int] = 1_6 UpperCAmelCase_ : Any = 3_2 def _UpperCamelCase (_lowerCamelCase : List[Any] )-> Dict: '''simple docstring''' return int(x / 2**20 ) class lowerCAmelCase : def __enter__( self ) -> List[str]: '''simple docstring''' gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero __snake_case = torch.cuda.memory_allocated() return self def __exit__( self , *__SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' gc.collect() torch.cuda.empty_cache() __snake_case = torch.cuda.memory_allocated() __snake_case = torch.cuda.max_memory_allocated() __snake_case = bamb(self.end - self.begin ) __snake_case = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] = 16 , _lowerCamelCase : Dict = "bert-base-cased" , _lowerCamelCase : Union[str, Any] = 3_20 , _lowerCamelCase : Tuple = 1_60 , )-> List[str]: '''simple docstring''' __snake_case = AutoTokenizer.from_pretrained(_lowerCAmelCase ) __snake_case = load_dataset( '''glue''' , '''mrpc''' , split={'''train''': f'''train[:{n_train}]''', '''validation''': f'''validation[:{n_val}]'''} ) def tokenize_function(_lowerCamelCase : str ): # max_length=None => use the model max length (it's actually the default) __snake_case = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __snake_case = datasets.map( _lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_lowerCAmelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __snake_case = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_lowerCamelCase : int ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_lowerCAmelCase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' ) return tokenizer.pad(_lowerCAmelCase , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __snake_case = DataLoader( tokenized_datasets['''train'''] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) __snake_case = DataLoader( tokenized_datasets['''validation'''] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) return train_dataloader, eval_dataloader def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] )-> List[str]: '''simple docstring''' __snake_case = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __snake_case = config['''lr'''] __snake_case = int(config['''num_epochs'''] ) __snake_case = int(config['''seed'''] ) __snake_case = int(config['''batch_size'''] ) __snake_case = args.model_name_or_path set_seed(_lowerCAmelCase ) __snake_case , __snake_case = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __snake_case = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase ) # Instantiate optimizer __snake_case = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __snake_case = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase ) if accelerator.state.deepspeed_plugin is not None: __snake_case = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: __snake_case = 1 __snake_case = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __snake_case = get_linear_schedule_with_warmup( optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , ) else: __snake_case = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # We need to keep track of how many total steps we have iterated over __snake_case = 0 # We also need to keep track of the stating epoch so files are named properly __snake_case = 0 # Now we train the model __snake_case = {} for epoch in range(_lowerCAmelCase , _lowerCAmelCase ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(_lowerCAmelCase ): __snake_case = model(**_lowerCAmelCase ) __snake_case = outputs.loss __snake_case = loss / gradient_accumulation_steps accelerator.backward(_lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) ) accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) ) accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) ) accelerator.print( '''Total Peak Memory consumed during the train (max): {}'''.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) __snake_case = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) def _UpperCamelCase ()-> Dict: '''simple docstring''' __snake_case = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=_lowerCAmelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowerCAmelCase , ) parser.add_argument( '''--output_dir''' , type=_lowerCAmelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--peak_memory_upper_bound''' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , ) parser.add_argument( '''--n_train''' , type=_lowerCAmelCase , default=3_20 , help='''Number of training examples to use.''' , ) parser.add_argument( '''--n_val''' , type=_lowerCAmelCase , default=1_60 , help='''Number of validation examples to use.''' , ) parser.add_argument( '''--num_epochs''' , type=_lowerCAmelCase , default=1 , help='''Number of train epochs.''' , ) __snake_case = parser.parse_args() __snake_case = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(_lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": main()
24
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class UpperCamelCase( unittest.TestCase ): snake_case_ : int = MODEL_FOR_MASKED_LM_MAPPING snake_case_ : Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' __snake_case = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" ) __snake_case = unmasker("My name is <mask>" ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [ {"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 3_8_0_1_5, "token_str": " grouped"}, {"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 2_5_5_0_6, "token_str": " accuser"}, ] , ) __snake_case = unmasker("The largest city in France is <mask>" ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [ { "sequence": "The largest city in France is grouped", "score": 2.1e-0_5, "token": 3_8_0_1_5, "token_str": " grouped", }, { "sequence": "The largest city in France is accuser", "score": 2.1e-0_5, "token": 2_5_5_0_6, "token_str": " accuser", }, ] , ) __snake_case = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [ {"sequence": "My name is Clara", "score": 2e-0_5, "token": 1_3_6_0_6, "token_str": " Clara"}, {"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3_4_9_9, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2_9_4_1, "token_str": " Te"}, ] , ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]: '''simple docstring''' __snake_case = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" ) __snake_case = unmasker("My name is <mask>" ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [ {"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 3_5_6_7_6, "token_str": " Maul"}, {"sequence": "My name isELS", "score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS"}, ] , ) __snake_case = unmasker("The largest city in France is <mask>" ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [ { "sequence": "The largest city in France is Maul", "score": 2.2e-0_5, "token": 3_5_6_7_6, "token_str": " Maul", }, {"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS"}, ] , ) __snake_case = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [ {"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3_4_9_9, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 2e-0_5, "token": 2_9_4_1, "token_str": " Te"}, {"sequence": "My name is Clara", "score": 2e-0_5, "token": 1_3_6_0_6, "token_str": " Clara"}, ] , ) __snake_case = unmasker("My name is <mask> <mask>" , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [ [ { "score": 2.2e-0_5, "token": 3_5_6_7_6, "token_str": " Maul", "sequence": "<s>My name is Maul<mask></s>", }, {"score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"}, ], [ { "score": 2.2e-0_5, "token": 3_5_6_7_6, "token_str": " Maul", "sequence": "<s>My name is<mask> Maul</s>", }, {"score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"}, ], ] , ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' __snake_case = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" ) # convert model to fp16 pipe.model.half() __snake_case = pipe("Paris is the [MASK] of France." ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow @require_torch def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple: '''simple docstring''' __snake_case = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" ) self.run_large_test(SCREAMING_SNAKE_CASE ) @slow @require_tf def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int: '''simple docstring''' __snake_case = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" ) self.run_large_test(SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] ) -> str: '''simple docstring''' __snake_case = unmasker("My name is <mask>" ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE ) , [ {"sequence": "My name is John", "score": 0.008, "token": 6_1_0, "token_str": " John"}, {"sequence": "My name is Chris", "score": 0.007, "token": 1_5_7_3, "token_str": " Chris"}, ] , ) __snake_case = unmasker("The largest city in France is <mask>" ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE ) , [ { "sequence": "The largest city in France is Paris", "score": 0.251, "token": 2_2_0_1, "token_str": " Paris", }, { "sequence": "The largest city in France is Lyon", "score": 0.214, "token": 1_2_7_9_0, "token_str": " Lyon", }, ] , ) __snake_case = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE ) , [ {"sequence": "My name is Patrick", "score": 0.005, "token": 3_4_9_9, "token_str": " Patrick"}, {"sequence": "My name is Clara", "score": 0.000, "token": 1_3_6_0_6, "token_str": " Clara"}, {"sequence": "My name is Te", "score": 0.000, "token": 2_9_4_1, "token_str": " Te"}, ] , ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str: '''simple docstring''' __snake_case = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" ) __snake_case = None __snake_case = None self.run_pipeline_test(SCREAMING_SNAKE_CASE , [] ) @require_tf def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' __snake_case = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" ) __snake_case = None __snake_case = None self.run_pipeline_test(SCREAMING_SNAKE_CASE , [] ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ) -> int: '''simple docstring''' if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" ) __snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) __snake_case = [ f'''This is another {tokenizer.mask_token} test''', ] return fill_masker, examples def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> Optional[Any]: '''simple docstring''' __snake_case = fill_masker.tokenizer __snake_case = fill_masker.model __snake_case = fill_masker( f'''This is a {tokenizer.mask_token}''' , ) self.assertEqual( SCREAMING_SNAKE_CASE , [ {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, ] , ) __snake_case = fill_masker([f'''This is a {tokenizer.mask_token}'''] ) self.assertEqual( SCREAMING_SNAKE_CASE , [ {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, ] , ) __snake_case = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] ) self.assertEqual( SCREAMING_SNAKE_CASE , [ [ {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, ], [ {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, ], ] , ) with self.assertRaises(SCREAMING_SNAKE_CASE ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(SCREAMING_SNAKE_CASE ): fill_masker("This is" ) self.run_test_top_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.run_test_targets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.run_test_top_k_targets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.fill_mask_with_duplicate_targets_and_top_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.fill_mask_with_multiple_masks(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any: '''simple docstring''' __snake_case = tokenizer.get_vocab() __snake_case = sorted(vocab.keys() )[:2] # Pipeline argument __snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , targets=SCREAMING_SNAKE_CASE ) __snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' ) self.assertEqual( SCREAMING_SNAKE_CASE , [ {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, ] , ) __snake_case = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs} , SCREAMING_SNAKE_CASE ) __snake_case = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs} , set(SCREAMING_SNAKE_CASE ) ) # Call argument __snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) __snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=SCREAMING_SNAKE_CASE ) self.assertEqual( SCREAMING_SNAKE_CASE , [ {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, ] , ) __snake_case = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs} , SCREAMING_SNAKE_CASE ) __snake_case = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs} , set(SCREAMING_SNAKE_CASE ) ) # Score equivalence __snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=SCREAMING_SNAKE_CASE ) __snake_case = [top_mask["token_str"] for top_mask in outputs] __snake_case = [top_mask["score"] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(SCREAMING_SNAKE_CASE ) == set(SCREAMING_SNAKE_CASE ): __snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=SCREAMING_SNAKE_CASE ) __snake_case = [top_mask["score"] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , nested_simplify(SCREAMING_SNAKE_CASE ) ) # Raises with invalid with self.assertRaises(SCREAMING_SNAKE_CASE ): __snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(SCREAMING_SNAKE_CASE ): __snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""] ) with self.assertRaises(SCREAMING_SNAKE_CASE ): __snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]: '''simple docstring''' __snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , top_k=2 ) __snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' ) self.assertEqual( SCREAMING_SNAKE_CASE , [ {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, ] , ) __snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) __snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 ) self.assertEqual( SCREAMING_SNAKE_CASE , [ {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, ] , ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , nested_simplify(SCREAMING_SNAKE_CASE ) ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: '''simple docstring''' __snake_case = tokenizer.get_vocab() __snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) # top_k=2, ntargets=3 __snake_case = sorted(vocab.keys() )[:3] __snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=SCREAMING_SNAKE_CASE ) # If we use the most probably targets, and filter differently, we should still # have the same results __snake_case = [el["token_str"] for el in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(SCREAMING_SNAKE_CASE ).issubset(SCREAMING_SNAKE_CASE ): __snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=SCREAMING_SNAKE_CASE ) # They should yield exactly the same result self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , nested_simplify(SCREAMING_SNAKE_CASE ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]: '''simple docstring''' __snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) __snake_case = tokenizer.get_vocab() # String duplicates + id duplicates __snake_case = sorted(vocab.keys() )[:3] __snake_case = [targets[0], targets[1], targets[0], targets[2], targets[1]] __snake_case = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=SCREAMING_SNAKE_CASE , top_k=1_0 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 3 ) def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ) -> Optional[Any]: '''simple docstring''' __snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) __snake_case = fill_masker( f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 ) self.assertEqual( SCREAMING_SNAKE_CASE , [ [ {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, ], [ {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, ], [ {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, {"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )}, ], ] , )
371
0
import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = {"vocab_file": "vocab.txt"} _lowercase = { "vocab_file": { "facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt", "facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt", }, } _lowercase = { "facebook/esm2_t6_8M_UR50D": 1024, "facebook/esm2_t12_35M_UR50D": 1024, } def lowerCAmelCase__ ( UpperCamelCase_ : Dict )-> List[str]: with open(UpperCamelCase_ , '''r''' ) as f: A__ = f.read().splitlines() return [l.strip() for l in lines] class _UpperCAmelCase ( A__ ): UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self , a__ , a__="<unk>" , a__="<cls>" , a__="<pad>" , a__="<mask>" , a__="<eos>" , **a__ , ): super().__init__(**a__) A__ = load_vocab_file(a__) A__ = dict(enumerate(self.all_tokens)) A__ = {tok: ind for ind, tok in enumerate(self.all_tokens)} A__ = unk_token A__ = cls_token A__ = pad_token A__ = mask_token A__ = eos_token A__ = self.all_tokens self._create_trie(self.unique_no_split_tokens) def snake_case_ ( self , a__): return self._id_to_token.get(a__ , self.unk_token) def snake_case_ ( self , a__): return self._token_to_id.get(a__ , self._token_to_id.get(self.unk_token)) def snake_case_ ( self , a__ , **a__): return text.split() def snake_case_ ( self , a__=False): return len(self._id_to_token) def snake_case_ ( self): return {token: i for i, token in enumerate(self.all_tokens)} def snake_case_ ( self , a__): return self._token_to_id.get(a__ , self._token_to_id.get(self.unk_token)) def snake_case_ ( self , a__): return self._id_to_token.get(a__ , self.unk_token) def snake_case_ ( self , a__ , a__ = None): A__ = [self.cls_token_id] A__ = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''') return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def snake_case_ ( self , a__ , a__ = None , a__ = False): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''') return [1 if token in self.all_special_ids else 0 for token in token_ids_a] A__ = [1] + ([0] * len(a__)) + [1] if token_ids_a is not None: mask += [0] * len(a__) + [1] return mask def snake_case_ ( self , a__ , a__): A__ = os.path.join(a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''') with open(a__ , '''w''') as f: f.write('''\n'''.join(self.all_tokens)) return (vocab_file,) @property def snake_case_ ( self): return self.get_vocab_size(with_added_tokens=a__) def snake_case_ ( self , a__ , a__ = False): return super()._add_tokens(a__ , special_tokens=a__)
719
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig _lowercase = { "albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json", "albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json", "albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json", "albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json", "albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json", "albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json", "albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json", "albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json", } class _UpperCAmelCase ( A__ ): UpperCamelCase__ = '''albert''' def __init__( self , a__=3_0_0_0_0 , a__=1_2_8 , a__=4_0_9_6 , a__=1_2 , a__=1 , a__=6_4 , a__=1_6_3_8_4 , a__=1 , a__="gelu_new" , a__=0 , a__=0 , a__=5_1_2 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=0.1 , a__="absolute" , a__=0 , a__=2 , a__=3 , **a__ , ): super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__) A__ = vocab_size A__ = embedding_size A__ = hidden_size A__ = num_hidden_layers A__ = num_hidden_groups A__ = num_attention_heads A__ = inner_group_num A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = classifier_dropout_prob A__ = position_embedding_type class _UpperCAmelCase ( A__ ): @property def snake_case_ ( self): if self.task == "multiple-choice": A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A__ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ])
526
0
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black __A : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. __A : str = ''' def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states ''' class __snake_case ( unittest.TestCase): """simple docstring""" def __lowercase ( self : List[Any] ) -> Optional[Any]: lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) ) lowerCAmelCase_ : List[Any] = self.transformer_dir shutil.copy( os.path.join(snake_case__ , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , ) def __lowercase ( self : Any ) -> Optional[int]: lowerCAmelCase_ : Optional[int] = "src/transformers" shutil.rmtree(self.transformer_dir ) def __lowercase ( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int]=None ) -> Tuple: lowerCAmelCase_ : List[str] = comment + F'\nclass {class_name}(nn.Module):\n' + class_code if overwrite_result is not None: lowerCAmelCase_ : int = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result lowerCAmelCase_ : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) lowerCAmelCase_ : List[Any] = black.format_str(snake_case__ , mode=snake_case__ ) lowerCAmelCase_ : List[str] = os.path.join(self.transformer_dir , """new_code.py""" ) with open(snake_case__ , """w""" , newline="""\n""" ) as f: f.write(snake_case__ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(snake_case__ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=snake_case__ ) with open(snake_case__ , """r""" ) as f: self.assertTrue(f.read() , snake_case__ ) def __lowercase ( self : str ) -> Optional[int]: lowerCAmelCase_ : int = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" ) self.assertEqual(snake_case__ , snake_case__ ) def __lowercase ( self : Any ) -> List[Any]: self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , ) # With no empty line at the end self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , snake_case__ , ) # Copy consistency with rename self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , snake_case__ ) , ) # Copy consistency with a really long name lowerCAmelCase_ : Optional[int] = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , F'{long_class_name}LMPredictionHead' , re.sub("""Bert""" , snake_case__ , snake_case__ ) , ) # Copy consistency with overwrite self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , snake_case__ , overwrite_result=re.sub("""Bert""" , """TestModel""" , snake_case__ ) , ) def __lowercase ( self : Dict ) -> str: lowerCAmelCase_ : Optional[Any] = check_copies.LOCALIZED_READMES["README_zh-hans.md"] lowerCAmelCase_ : int = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace)," " released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**" " (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders" " as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang" " Luong, Quoc V. Le, Christopher D. Manning." ) lowerCAmelCase_ : Optional[Any] = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) lowerCAmelCase_ : int = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文" " [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自" " Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather" " than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le," " Christopher D. Manning 发布。\n" ) lowerCAmelCase_ : Dict = check_copies.convert_to_localized_md( snake_case__ , snake_case__ , localized_readme["""format_model_list"""] ) self.assertFalse(snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) lowerCAmelCase_ : Optional[Any] = check_copies.convert_to_localized_md( snake_case__ , snake_case__ , localized_readme["""format_model_list"""] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(snake_case__ ) lowerCAmelCase_ : Union[str, Any] = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut." ) lowerCAmelCase_ : List[Any] = ( "1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and" " the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) lowerCAmelCase_ : str = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) lowerCAmelCase_ : Dict = check_copies.convert_to_localized_md( snake_case__ , snake_case__ , localized_readme["""format_model_list"""] ) # Check if the model link is synchronized. self.assertEqual(snake_case__ , snake_case__ )
275
"""simple docstring""" from __future__ import annotations def _lowerCAmelCase ( lowerCamelCase__ : list[list[int]] ) -> bool: _SCREAMING_SNAKE_CASE : int = len(lowerCamelCase__ ) # We need to create solution object to save path. _SCREAMING_SNAKE_CASE : Dict = [[0 for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )] _SCREAMING_SNAKE_CASE : str = run_maze(lowerCamelCase__, 0, 0, lowerCamelCase__ ) if solved: print("\n".join(str(lowerCamelCase__ ) for row in solutions ) ) else: print("No solution exists!" ) return solved def _lowerCAmelCase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : list[list[int]] ) -> bool: _SCREAMING_SNAKE_CASE : Tuple = len(lowerCamelCase__ ) # Final check point. if i == j == (size - 1): _SCREAMING_SNAKE_CASE : List[str] = 1 return True _SCREAMING_SNAKE_CASE : Tuple = (not i < 0) and (not j < 0) # Check lower bounds _SCREAMING_SNAKE_CASE : str = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. _SCREAMING_SNAKE_CASE : Optional[int] = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited _SCREAMING_SNAKE_CASE : List[Any] = 1 # check for directions if ( run_maze(lowerCamelCase__, i + 1, lowerCamelCase__, lowerCamelCase__ ) or run_maze(lowerCamelCase__, lowerCamelCase__, j + 1, lowerCamelCase__ ) or run_maze(lowerCamelCase__, i - 1, lowerCamelCase__, lowerCamelCase__ ) or run_maze(lowerCamelCase__, lowerCamelCase__, j - 1, lowerCamelCase__ ) ): return True _SCREAMING_SNAKE_CASE : Optional[Any] = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
572
0
'''simple docstring''' from __future__ import annotations def UpperCamelCase__ ( a__ , a__ ): '''simple docstring''' if b == 0: return (1, 0) ((_lowerCAmelCase) , (_lowerCAmelCase)) =extended_euclid(lowerCamelCase_ , a % b ) _lowerCAmelCase =a // b return (y, x - k * y) def UpperCamelCase__ ( a__ , a__ , a__ , a__ ): '''simple docstring''' ((_lowerCAmelCase) , (_lowerCAmelCase)) =extended_euclid(lowerCamelCase_ , lowerCamelCase_ ) _lowerCAmelCase =na * na _lowerCAmelCase =ra * x * na + ra * y * na return (n % m + m) % m def UpperCamelCase__ ( a__ , a__ ): '''simple docstring''' ((_lowerCAmelCase) , (_lowerCAmelCase)) =extended_euclid(lowerCamelCase_ , lowerCamelCase_ ) if b < 0: _lowerCAmelCase =(b % n + n) % n return b def UpperCamelCase__ ( a__ , a__ , a__ , a__ ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase =invert_modulo(lowerCamelCase_ , lowerCamelCase_ ), invert_modulo(lowerCamelCase_ , lowerCamelCase_ ) _lowerCAmelCase =na * na _lowerCAmelCase =ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name='''chinese_remainder_theorem''', verbose=True) testmod(name='''chinese_remainder_theorem2''', verbose=True) testmod(name='''invert_modulo''', verbose=True) testmod(name='''extended_euclid''', verbose=True)
712
'''simple docstring''' def UpperCamelCase__ ( a__ ): '''simple docstring''' _lowerCAmelCase =set() # To detect a back edge, keep track of vertices currently in the recursion stack _lowerCAmelCase =set() return any( node not in visited and depth_first_search(a__ , a__ , a__ , a__ ) for node in graph ) def UpperCamelCase__ ( a__ , a__ , a__ , a__ ): '''simple docstring''' visited.add(a__ ) rec_stk.add(a__ ) for node in graph[vertex]: if node not in visited: if depth_first_search(a__ , a__ , a__ , a__ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(a__ ) return False if __name__ == "__main__": from doctest import testmod testmod()
58
0
'''simple docstring''' import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset SCREAMING_SNAKE_CASE__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class snake_case (nn.Module ): def __init__( self ,UpperCAmelCase_ ) -> Dict: super().__init__() lowercase__ = torchvision.models.resnetaaa(pretrained=UpperCAmelCase_ ) lowercase__ = list(model.children() )[:-2] lowercase__ = nn.Sequential(*UpperCAmelCase_ ) lowercase__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def _a ( self ,UpperCAmelCase_ ) -> str: # Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048 lowercase__ = self.pool(self.model(UpperCAmelCase_ ) ) lowercase__ = torch.flatten(UpperCAmelCase_ ,start_dim=2 ) lowercase__ = out.transpose(1 ,2 ).contiguous() return out # BxNx2048 class snake_case (UpperCamelCase ): def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Dict: lowercase__ = [json.loads(UpperCAmelCase_ ) for l in open(UpperCAmelCase_ )] lowercase__ = os.path.dirname(UpperCAmelCase_ ) lowercase__ = tokenizer lowercase__ = labels lowercase__ = len(UpperCAmelCase_ ) lowercase__ = max_seq_length lowercase__ = transforms def __len__( self ) -> Optional[Any]: return len(self.data ) def __getitem__( self ,UpperCAmelCase_ ) -> Optional[int]: lowercase__ = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] ,add_special_tokens=UpperCAmelCase_ ) ) lowercase__ , lowercase__ , lowercase__ = sentence[0], sentence[1:-1], sentence[-1] lowercase__ = sentence[: self.max_seq_length] lowercase__ = torch.zeros(self.n_classes ) lowercase__ = 1 lowercase__ = Image.open(os.path.join(self.data_dir ,self.data[index]["img"] ) ).convert("RGB" ) lowercase__ = self.transforms(UpperCAmelCase_ ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def _a ( self ) -> str: lowercase__ = Counter() for row in self.data: label_freqs.update(row["label"] ) return label_freqs def lowerCamelCase ( _snake_case : Optional[int] ): '''simple docstring''' lowercase__ = [len(row["sentence"] ) for row in batch] lowercase__ , lowercase__ = len(_snake_case ), max(_snake_case ) lowercase__ = torch.zeros(_snake_case ,_snake_case ,dtype=torch.long ) lowercase__ = torch.zeros(_snake_case ,_snake_case ,dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_snake_case ,_snake_case ) ): lowercase__ = input_row["sentence"] lowercase__ = 1 lowercase__ = torch.stack([row["image"] for row in batch] ) lowercase__ = torch.stack([row["label"] for row in batch] ) lowercase__ = torch.stack([row["image_start_token"] for row in batch] ) lowercase__ = torch.stack([row["image_end_token"] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ): '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ): '''simple docstring''' return transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] ,std=[0.12_221_994, 0.12_145_835, 0.14_380_469] ,), ] )
267
'''simple docstring''' from __future__ import annotations import math def lowerCamelCase ( _snake_case : float ,_snake_case : int ): '''simple docstring''' lowercase__ = u for i in range(1 ,_snake_case ): lowercase__ = temp * (u - i) return temp def lowerCamelCase ( ): '''simple docstring''' lowercase__ = int(input("enter the numbers of values: " ) ) lowercase__ = [] for _ in range(_snake_case ): y.append([] ) for i in range(_snake_case ): for j in range(_snake_case ): y[i].append(_snake_case ) lowercase__ = 0 print("enter the values of parameters in a list: " ) lowercase__ = list(map(_snake_case ,input().split() ) ) print("enter the values of corresponding parameters: " ) for i in range(_snake_case ): lowercase__ = float(input() ) lowercase__ = int(input("enter the value to interpolate: " ) ) lowercase__ = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 ,_snake_case ): for j in range(n - i ): lowercase__ = y[j + 1][i - 1] - y[j][i - 1] lowercase__ = y[0][0] for i in range(1 ,_snake_case ): summ += (ucal(_snake_case ,_snake_case ) * y[0][i]) / math.factorial(_snake_case ) print(f'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
267
1
from PIL import Image def SCREAMING_SNAKE_CASE ( snake_case_ : Image ): snake_case__ : Dict = image.size snake_case__ : List[str] = 0 snake_case__ : List[Any] = image.load() for i in range(snake_case_ ): for j in range(snake_case_ ): snake_case__ : int = pixels[j, i] mean += pixel mean //= width * height for j in range(snake_case_ ): for i in range(snake_case_ ): snake_case__ : str = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": __lowerCamelCase : Dict = mean_threshold(Image.open("""path_to_image""").convert("""L""")) image.save("""output_image_path""")
720
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets __lowerCamelCase : Optional[int] = """\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } """ __lowerCamelCase : str = """\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve """ __lowerCamelCase : str = """ Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: \"c\" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric('mauve') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): """simple docstring""" def _lowercase ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[ "https://arxiv.org/abs/2102.01454", "https://github.com/krishnap25/mauve", ] , ) def _lowercase ( self : Union[str, Any] , __A : Dict , __A : List[str] , __A : int=None , __A : List[Any]=None , __A : Optional[int]=None , __A : List[Any]=None , __A : Union[str, Any]="auto" , __A : Optional[Any]=-1 , __A : Optional[Any]=0.9 , __A : Any=5 , __A : List[Any]=5_0_0 , __A : Tuple="gpt2-large" , __A : Optional[Any]=-1 , __A : str=1_0_2_4 , __A : Tuple=2_5 , __A : str=5 , __A : Optional[int]=True , __A : Any=2_5 , ): snake_case__ : List[Any] = compute_mauve( p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , ) return out
25
0
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( _a ): lowerCamelCase_ : str = ['''image_processor''', '''tokenizer'''] lowerCamelCase_ : Dict = '''FlavaImageProcessor''' lowerCamelCase_ : List[str] = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__(self , __magic_name__=None , __magic_name__=None , **__magic_name__ ) -> List[Any]: '''simple docstring''' snake_case_ : Any = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __magic_name__ , ) snake_case_ : Optional[Any] = kwargs.pop('''feature_extractor''' ) snake_case_ : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__magic_name__ , __magic_name__ ) snake_case_ : Tuple = self.image_processor def __call__(self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , __magic_name__ = False , __magic_name__ = False , __magic_name__ = None , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = True , __magic_name__ = None , **__magic_name__ , ) -> Dict: '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: snake_case_ : Optional[int] = self.tokenizer( text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , ) if images is not None: snake_case_ : Any = self.image_processor( __magic_name__ , return_image_mask=__magic_name__ , return_codebook_pixels=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , ) if text is not None and images is not None: encoding.update(__magic_name__ ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ ) def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Optional[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> List[Any]: '''simple docstring''' return self.tokenizer.decode(*__magic_name__ , **__magic_name__ ) @property def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Tuple = self.tokenizer.model_input_names snake_case_ : List[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def lowerCamelCase (self ) -> str: '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __magic_name__ , ) return self.image_processor_class @property def lowerCamelCase (self ) -> List[str]: '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __magic_name__ , ) return self.image_processor
60
import sys lowercase_ = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def a ( A__ : str = N ) -> int: """simple docstring""" _lowercase =-sys.maxsize - 1 for i in range(len(A__ ) - 12 ): _lowercase =1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: _lowercase =product return largest_product if __name__ == "__main__": print(f"{solution() = }")
291
0
from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup lowerCamelCase__ : Dict = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l=''' def lowercase_ ( SCREAMING_SNAKE_CASE : str = "mumbai" ): """simple docstring""" snake_case__ : Optional[int] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' ) # This attribute finds out all the specifics listed in a job for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ): snake_case__ : Optional[Any] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip() snake_case__ : Dict =job.find('''span''' , {'''class''': '''company'''} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs('''Bangalore'''), 1): print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
708
from torch import nn class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" super().__init__() snake_case__ : Tuple =class_size snake_case__ : List[Any] =embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) snake_case__ : Optional[Any] =nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" snake_case__ : str =self.mlp(__SCREAMING_SNAKE_CASE ) return logits
408
0