code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase : Tuple = { "configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"], "feature_extraction_mctct": ["MCTCTFeatureExtractor"], "processing_mctct": ["MCTCTProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Union[str, Any] = [ "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST", "MCTCTForCTC", "MCTCTModel", "MCTCTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys __UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
718
import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer __UpperCAmelCase : Dict = logging.get_logger(__name__) __UpperCAmelCase : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __UpperCAmelCase : Dict = { "vocab_file": { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt" ), } } __UpperCAmelCase : Tuple = { "junnyu/roformer_chinese_small": 1536, "junnyu/roformer_chinese_base": 1536, "junnyu/roformer_chinese_char_small": 512, "junnyu/roformer_chinese_char_base": 512, "junnyu/roformer_small_discriminator": 128, "junnyu/roformer_small_generator": 128, } __UpperCAmelCase : Any = { "junnyu/roformer_chinese_small": {"do_lower_case": True}, "junnyu/roformer_chinese_base": {"do_lower_case": True}, "junnyu/roformer_chinese_char_small": {"do_lower_case": True}, "junnyu/roformer_chinese_char_base": {"do_lower_case": True}, "junnyu/roformer_small_discriminator": {"do_lower_case": True}, "junnyu/roformer_small_generator": {"do_lower_case": True}, } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : str = VOCAB_FILES_NAMES __UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : int = PRETRAINED_INIT_CONFIGURATION __UpperCamelCase : Any = RoFormerTokenizer def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE ) != do_lower_case or pre_tok_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE ) != strip_accents ): UpperCamelCase : List[Any] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) ) UpperCamelCase : Optional[int] = do_lower_case UpperCamelCase : Optional[Any] = strip_accents UpperCamelCase : List[Any] = pre_tok_class(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = do_lower_case def __getstate__( self ): """simple docstring""" UpperCamelCase : Optional[int] = self.__dict__.copy() UpperCamelCase : Any = BertPreTokenizer() return state def __setstate__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = d UpperCamelCase : List[str] = self.__dict__['''_tokenizer'''].get_vocab() UpperCamelCase : Any = PreTokenizer.custom(JiebaPreTokenizer(__SCREAMING_SNAKE_CASE ) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" UpperCamelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : Dict = [self.sep_token_id] UpperCamelCase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : List[Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE ) return tuple(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : Any = BertPreTokenizer() return super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
643
0
import random def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : bool = False ): """simple docstring""" UpperCamelCase : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE_ )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(SCREAMING_SNAKE_CASE_ ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(SCREAMING_SNAKE_CASE_ ): for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ): if random.random() < probability: graph[i].append(SCREAMING_SNAKE_CASE_ ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(SCREAMING_SNAKE_CASE_ ) return graph def a ( SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" return { i: [j for j in range(SCREAMING_SNAKE_CASE_ ) if i != j] for i in range(SCREAMING_SNAKE_CASE_ ) } if __name__ == "__main__": import doctest doctest.testmod()
719
from __future__ import annotations def a ( SCREAMING_SNAKE_CASE_ : list[int] ): """simple docstring""" if len(SCREAMING_SNAKE_CASE_ ) == 0: return array UpperCamelCase , UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ ) # Compute the variables UpperCamelCase : Union[str, Any] = _max - _min + 1 UpperCamelCase , UpperCamelCase : Optional[Any] = [0] * holes_range, [0] * holes_range # Make the sorting. for i in array: UpperCamelCase : Optional[int] = i - _min UpperCamelCase : Any = i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. UpperCamelCase : str = 0 for i in range(SCREAMING_SNAKE_CASE_ ): while holes_repeat[i] > 0: UpperCamelCase : List[Any] = holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase : Any = input("Enter numbers separated by comma:\n") __UpperCAmelCase : int = [int(x) for x in user_input.split(",")] print(pigeon_sort(unsorted))
643
0
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Union[str, Any] = (IPNDMScheduler,) __UpperCamelCase : str = (("num_inference_steps", 50),) def _lowercase ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[Any] = {'''num_train_timesteps''': 1_000} config.update(**__SCREAMING_SNAKE_CASE ) return config def _lowercase ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[int] = dict(self.forward_default_kwargs ) UpperCamelCase : int = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) UpperCamelCase : int = self.dummy_sample UpperCamelCase : Dict = 0.1 * sample UpperCamelCase : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCamelCase : List[Any] = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals UpperCamelCase : Optional[int] = dummy_past_residuals[:] if time_step is None: UpperCamelCase : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE ) new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals UpperCamelCase : List[str] = dummy_past_residuals[:] UpperCamelCase : List[Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase : Optional[int] = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" UpperCamelCase : int = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase : int = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _lowercase ( self ): """simple docstring""" pass def _lowercase ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[Any] = dict(self.forward_default_kwargs ) UpperCamelCase : Optional[Any] = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) UpperCamelCase : int = self.dummy_sample UpperCamelCase : Union[str, Any] = 0.1 * sample UpperCamelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCamelCase : Any = self.get_scheduler_config() UpperCamelCase : Any = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals (must be after setting timesteps) UpperCamelCase : str = dummy_past_residuals[:] if time_step is None: UpperCamelCase : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residual (must be after setting timesteps) UpperCamelCase : Tuple = dummy_past_residuals[:] UpperCamelCase : List[Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase : Dict = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" UpperCamelCase : Tuple = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase : str = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _lowercase ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[str] = self.scheduler_classes[0] UpperCamelCase : Any = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = scheduler_class(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = 10 UpperCamelCase : Optional[Any] = self.dummy_model() UpperCamelCase : Dict = self.dummy_sample_deter scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample for i, t in enumerate(scheduler.timesteps ): UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample return sample def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = dict(self.forward_default_kwargs ) UpperCamelCase : Dict = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) for scheduler_class in self.scheduler_classes: UpperCamelCase : List[str] = self.get_scheduler_config() UpperCamelCase : Tuple = scheduler_class(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = self.dummy_sample UpperCamelCase : Union[str, Any] = 0.1 * sample if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ): scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ): UpperCamelCase : Optional[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCamelCase : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCamelCase : Optional[Any] = dummy_past_residuals[:] UpperCamelCase : List[Any] = scheduler.timesteps[5] UpperCamelCase : List[Any] = scheduler.timesteps[6] UpperCamelCase : int = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase : Union[str, Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCamelCase : Dict = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase : Optional[Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _lowercase ( self ): """simple docstring""" for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE , time_step=__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = self.full_loop() UpperCamelCase : Dict = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 2_540_529 ) < 10
720
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter __UpperCAmelCase : List[Any] = True except ImportError: __UpperCAmelCase : List[str] = False __UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name def a ( SCREAMING_SNAKE_CASE_ : Namespace ): """simple docstring""" return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class UpperCAmelCase_ ( _a): '''simple docstring''' @staticmethod def _lowercase ( __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[Any] = parser.add_parser('''add-new-model''' ) add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' ) add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''' ) add_new_model_parser.add_argument( '''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''' ) add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE ) def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = testing UpperCamelCase : Any = testing_file UpperCamelCase : Dict = path def _lowercase ( self ): """simple docstring""" warnings.warn( '''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ''' '''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ''' '''checks, you should use `transformers-cli add-new-model-like` instead.''' ) if not _has_cookiecutter: raise ImportError( '''Model creation dependencies are required to use the `add_new_model` command. Install them by running ''' '''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory UpperCamelCase : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]] if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError( '''Several directories starting with `cookiecutter-template-` in current working directory. ''' '''Please clean your directory by removing all folders starting with `cookiecutter-template-` or ''' '''change your working directory.''' ) UpperCamelCase : Dict = ( Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) UpperCamelCase : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model''' # Execute cookiecutter if not self._testing: cookiecutter(str(__SCREAMING_SNAKE_CASE ) ) else: with open(self._testing_file , '''r''' ) as configuration_file: UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0] # Retrieve configuration with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file: UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = configuration['''lowercase_modelname'''] UpperCamelCase : int = configuration['''generate_tensorflow_pytorch_and_flax'''] os.remove(f"""{directory}/configuration.json""" ) UpperCamelCase : str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax UpperCamelCase : Any = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax UpperCamelCase : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax UpperCamelCase : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}""" os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__SCREAMING_SNAKE_CASE ) # Tests require submodules as they have parent imports with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ): pass shutil.move( f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , ) shutil.move( f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , ) def remove_copy_lines(__SCREAMING_SNAKE_CASE ): with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f: UpperCamelCase : Any = f.readlines() with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(__SCREAMING_SNAKE_CASE ) if output_pytorch: if not self._testing: remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" ) os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" ) if output_tensorflow: if not self._testing: remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" ) os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" ) if output_flax: if not self._testing: remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" ) os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , ) shutil.move( f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # Create temp file UpperCamelCase , UpperCamelCase : Optional[Any] = mkstemp() UpperCamelCase : Tuple = False with fdopen(__SCREAMING_SNAKE_CASE , '''w''' ) as new_file: with open(__SCREAMING_SNAKE_CASE ) as old_file: for line in old_file: new_file.write(__SCREAMING_SNAKE_CASE ) if line_to_copy_below in line: UpperCamelCase : Optional[int] = True for line_to_copy in lines_to_copy: new_file.write(__SCREAMING_SNAKE_CASE ) if not line_found: raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" ) # Copy the file permissions from the old file to the new file copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Remove original file remove(__SCREAMING_SNAKE_CASE ) # Move new file move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def skip_units(__SCREAMING_SNAKE_CASE ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(__SCREAMING_SNAKE_CASE ): with open(__SCREAMING_SNAKE_CASE ) as datafile: UpperCamelCase : int = [] UpperCamelCase : Dict = False UpperCamelCase : List[Any] = False for line in datafile: if "# To replace in: " in line and "##" not in line: UpperCamelCase : Dict = line.split('''"''' )[1] UpperCamelCase : int = skip_units(__SCREAMING_SNAKE_CASE ) elif "# Below: " in line and "##" not in line: UpperCamelCase : Dict = line.split('''"''' )[1] UpperCamelCase : List[str] = skip_units(__SCREAMING_SNAKE_CASE ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = [] elif "# Replace with" in line and "##" not in line: UpperCamelCase : Tuple = [] elif "##" not in line: lines_to_copy.append(__SCREAMING_SNAKE_CASE ) remove(__SCREAMING_SNAKE_CASE ) replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" ) os.rmdir(__SCREAMING_SNAKE_CASE )
643
0
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class UpperCAmelCase_ ( _a): '''simple docstring''' def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return 0.0 def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase : List[str] = min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] ) UpperCamelCase : str = max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def a ( SCREAMING_SNAKE_CASE_ : FilterType , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase : List[Any] = 5_1_2 UpperCamelCase : str = [1] + [0] * (size - 1) UpperCamelCase : Dict = [filter_type.process(SCREAMING_SNAKE_CASE_ ) for item in inputs] UpperCamelCase : int = [0] * (samplerate - size) # zero-padding outputs += filler UpperCamelCase : int = np.abs(np.fft.fft(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : str = 2_0 * np.logaa(SCREAMING_SNAKE_CASE_ ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(2_4 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds UpperCamelCase : str = get_bounds(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) plt.ylim(max([-8_0, bounds[0]] ) , min([8_0, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(SCREAMING_SNAKE_CASE_ ) plt.show() def a ( SCREAMING_SNAKE_CASE_ : FilterType , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase : Optional[int] = 5_1_2 UpperCamelCase : Union[str, Any] = [1] + [0] * (size - 1) UpperCamelCase : str = [filter_type.process(SCREAMING_SNAKE_CASE_ ) for item in inputs] UpperCamelCase : List[str] = [0] * (samplerate - size) # zero-padding outputs += filler UpperCamelCase : List[str] = np.angle(np.fft.fft(SCREAMING_SNAKE_CASE_ ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(2_4 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(SCREAMING_SNAKE_CASE_ , -2 * pi ) ) plt.show()
721
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase : str = cva.getAffineTransform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return cva.warpAffine(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (rows, cols) ) if __name__ == "__main__": # read original image __UpperCAmelCase : Tuple = cva.imread( str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg") ) # turn image in gray scale value __UpperCAmelCase : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape __UpperCAmelCase , __UpperCAmelCase : Tuple = gray_img.shape # set different points to rotate image __UpperCAmelCase : Optional[int] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) __UpperCAmelCase : Optional[int] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) __UpperCAmelCase : Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) __UpperCAmelCase : int = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list __UpperCAmelCase : Union[str, Any] = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations __UpperCAmelCase : List[str] = plt.figure(1) __UpperCAmelCase : Dict = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, "gray") plt.title(titles[i]) plt.axis("off") plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
643
0
import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup __UpperCAmelCase : str = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" " (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582" } def a ( SCREAMING_SNAKE_CASE_ : str = "dhaka" , SCREAMING_SNAKE_CASE_ : int = 5 ): """simple docstring""" UpperCamelCase : List[str] = min(SCREAMING_SNAKE_CASE_ , 5_0 ) # Prevent abuse! UpperCamelCase : str = { '''q''': query, '''tbm''': '''isch''', '''hl''': '''en''', '''ijn''': '''0''', } UpperCamelCase : int = requests.get('''https://www.google.com/search''' , params=SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = BeautifulSoup(html.text , '''html.parser''' ) UpperCamelCase : Union[str, Any] = ''''''.join( re.findall(R'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) ) UpperCamelCase : int = json.dumps(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = json.loads(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = re.findall( R'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , SCREAMING_SNAKE_CASE_ , ) if not matched_google_image_data: return 0 UpperCamelCase : Dict = re.sub( R'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(SCREAMING_SNAKE_CASE_ ) , ) UpperCamelCase : str = re.findall( R'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , SCREAMING_SNAKE_CASE_ , ) for index, fixed_full_res_image in enumerate(SCREAMING_SNAKE_CASE_ ): if index >= max_images: return index UpperCamelCase : Dict = bytes(SCREAMING_SNAKE_CASE_ , '''ascii''' ).decode( '''unicode-escape''' ) UpperCamelCase : Optional[Any] = bytes(SCREAMING_SNAKE_CASE_ , '''ascii''' ).decode( '''unicode-escape''' ) UpperCamelCase : Tuple = urllib.request.build_opener() UpperCamelCase : Optional[Any] = [ ( '''User-Agent''', '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36''' ''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''', ) ] urllib.request.install_opener(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = F"""query_{query.replace(" " , "_" )}""" if not os.path.exists(SCREAMING_SNAKE_CASE_ ): os.makedirs(SCREAMING_SNAKE_CASE_ ) urllib.request.urlretrieve( # noqa: S310 SCREAMING_SNAKE_CASE_ , F"""{path_name}/original_size_img_{index}.jpg""" ) return index if __name__ == "__main__": try: __UpperCAmelCase : str = download_images_from_google_query(sys.argv[1]) print(f'''{image_count} images were downloaded to disk.''') except IndexError: print("Please provide a search term.") raise
700
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) __UpperCAmelCase : List[str] = { "microsoft/conditional-detr-resnet-50": ( "https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json" ), } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Optional[int] = "conditional_detr" __UpperCamelCase : Optional[Any] = ["past_key_values"] __UpperCamelCase : Union[str, Any] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=300 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.25 , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) UpperCamelCase : str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : Tuple = backbone_config.get('''model_type''' ) UpperCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type] UpperCamelCase : Any = config_class.from_dict(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = use_timm_backbone UpperCamelCase : int = backbone_config UpperCamelCase : Any = num_channels UpperCamelCase : Optional[Any] = num_queries UpperCamelCase : Tuple = d_model UpperCamelCase : Optional[Any] = encoder_ffn_dim UpperCamelCase : Optional[int] = encoder_layers UpperCamelCase : Union[str, Any] = encoder_attention_heads UpperCamelCase : Optional[Any] = decoder_ffn_dim UpperCamelCase : Optional[int] = decoder_layers UpperCamelCase : Optional[Any] = decoder_attention_heads UpperCamelCase : Any = dropout UpperCamelCase : List[Any] = attention_dropout UpperCamelCase : List[Any] = activation_dropout UpperCamelCase : List[str] = activation_function UpperCamelCase : Optional[int] = init_std UpperCamelCase : Optional[Any] = init_xavier_std UpperCamelCase : Union[str, Any] = encoder_layerdrop UpperCamelCase : Optional[Any] = decoder_layerdrop UpperCamelCase : Tuple = encoder_layers UpperCamelCase : Optional[Any] = auxiliary_loss UpperCamelCase : Union[str, Any] = position_embedding_type UpperCamelCase : Optional[int] = backbone UpperCamelCase : Dict = use_pretrained_backbone UpperCamelCase : Tuple = dilation # Hungarian matcher UpperCamelCase : Union[str, Any] = class_cost UpperCamelCase : List[Any] = bbox_cost UpperCamelCase : Optional[Any] = giou_cost # Loss coefficients UpperCamelCase : Optional[Any] = mask_loss_coefficient UpperCamelCase : Optional[int] = dice_loss_coefficient UpperCamelCase : Optional[Any] = cls_loss_coefficient UpperCamelCase : Optional[int] = bbox_loss_coefficient UpperCamelCase : Optional[int] = giou_loss_coefficient UpperCamelCase : Optional[int] = focal_alpha super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @property def _lowercase ( self ): """simple docstring""" return self.encoder_attention_heads @property def _lowercase ( self ): """simple docstring""" return self.d_model def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: UpperCamelCase : List[Any] = self.backbone_config.to_dict() UpperCamelCase : List[Any] = self.__class__.model_type return output class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Dict = version.parse("1.11") @property def _lowercase ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def _lowercase ( self ): """simple docstring""" return 1e-5 @property def _lowercase ( self ): """simple docstring""" return 12
643
0
import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Union[str, Any] = None __UpperCamelCase : List[str] = None @property def _lowercase ( self ): """simple docstring""" return self.feat_extract_tester.prepare_feat_extract_dict() def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''feature_size''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''sampling_rate''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''padding_value''' ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = self.feat_extract_tester.prepare_inputs_for_common() UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_dict ) UpperCamelCase : Dict = feat_extract.model_input_names[0] UpperCamelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) for x, y in zip(__SCREAMING_SNAKE_CASE , processed_features[input_name] ) ) ) UpperCamelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' ) UpperCamelCase : Optional[int] = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCamelCase : List[Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict ) UpperCamelCase : str = feat_extract.model_input_names[0] UpperCamelCase : Any = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' ) UpperCamelCase : Union[str, Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCamelCase : Tuple = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) UpperCamelCase : Tuple = feat_extract.model_input_names[0] UpperCamelCase : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' ) UpperCamelCase : Any = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCamelCase : Union[str, Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE=False ): """simple docstring""" def _inputs_have_equal_length(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Union[str, Any] = len(input[0] ) for input_slice in input[1:]: if len(__SCREAMING_SNAKE_CASE ) != length: return False return True def _inputs_are_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): return False for input_slice_a, input_slice_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if not np.allclose(np.asarray(__SCREAMING_SNAKE_CASE ) , np.asarray(__SCREAMING_SNAKE_CASE ) , atol=1e-3 ): return False return True UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict ) UpperCamelCase : Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = feat_extract.model_input_names[0] UpperCamelCase : List[Any] = BatchFeature({input_name: speech_inputs} ) UpperCamelCase : Union[str, Any] = self.feat_extract_tester.seq_length_diff UpperCamelCase : str = self.feat_extract_tester.max_seq_length + pad_diff UpperCamelCase : Any = self.feat_extract_tester.min_seq_length UpperCamelCase : Optional[Any] = self.feat_extract_tester.batch_size UpperCamelCase : List[str] = self.feat_extract_tester.feature_size # test padding for List[int] + numpy UpperCamelCase : str = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = input_a[input_name] UpperCamelCase : Union[str, Any] = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='''longest''' ) UpperCamelCase : Tuple = input_a[input_name] UpperCamelCase : Tuple = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[-1] ) ) UpperCamelCase : str = input_a[input_name] UpperCamelCase : List[str] = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''np''' ) UpperCamelCase : Union[str, Any] = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(__SCREAMING_SNAKE_CASE ): feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='''max_length''' )[input_name] UpperCamelCase : int = feat_extract.pad( __SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_tensors='''np''' ) UpperCamelCase : Any = input_a[input_name] self.assertFalse(_inputs_have_equal_length(__SCREAMING_SNAKE_CASE ) ) self.assertTrue(_inputs_have_equal_length(__SCREAMING_SNAKE_CASE ) ) self.assertTrue(_inputs_have_equal_length(__SCREAMING_SNAKE_CASE ) ) self.assertTrue(_inputs_are_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy UpperCamelCase : str = feat_extract.pad(__SCREAMING_SNAKE_CASE , pad_to_multiple_of=10 ) UpperCamelCase : List[str] = input_a[input_name] UpperCamelCase : int = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='''longest''' , pad_to_multiple_of=10 ) UpperCamelCase : Union[str, Any] = input_a[input_name] UpperCamelCase : Optional[int] = feat_extract.pad( __SCREAMING_SNAKE_CASE , padding='''max_length''' , pad_to_multiple_of=10 , max_length=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = input_a[input_name] UpperCamelCase : Union[str, Any] = feat_extract.pad( __SCREAMING_SNAKE_CASE , padding='''max_length''' , pad_to_multiple_of=10 , max_length=__SCREAMING_SNAKE_CASE , return_tensors='''np''' , ) UpperCamelCase : Tuple = input_a[input_name] self.assertTrue(all(len(__SCREAMING_SNAKE_CASE ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) UpperCamelCase : str = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(__SCREAMING_SNAKE_CASE ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct UpperCamelCase : List[Any] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1e-3 ) def _lowercase ( self , __SCREAMING_SNAKE_CASE=False ): """simple docstring""" def _inputs_have_equal_length(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[int] = len(input[0] ) for input_slice in input[1:]: if len(__SCREAMING_SNAKE_CASE ) != length: return False return True def _inputs_are_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): return False for input_slice_a, input_slice_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if not np.allclose(np.asarray(__SCREAMING_SNAKE_CASE ) , np.asarray(__SCREAMING_SNAKE_CASE ) , atol=1e-3 ): return False return True UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) UpperCamelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = feat_extract.model_input_names[0] UpperCamelCase : str = BatchFeature({input_name: speech_inputs} ) # truncate to smallest UpperCamelCase : Tuple = feat_extract.pad( __SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = input_a[input_name] UpperCamelCase : Dict = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[0] ) ) UpperCamelCase : Optional[int] = input_a[input_name] self.assertTrue(_inputs_have_equal_length(__SCREAMING_SNAKE_CASE ) ) self.assertFalse(_inputs_have_equal_length(__SCREAMING_SNAKE_CASE ) ) # truncate to smallest with np UpperCamelCase : List[str] = feat_extract.pad( __SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[Any] = input_a[input_name] UpperCamelCase : str = feat_extract.pad( __SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' ) UpperCamelCase : Union[str, Any] = input_a[input_name] self.assertTrue(_inputs_have_equal_length(__SCREAMING_SNAKE_CASE ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(__SCREAMING_SNAKE_CASE ) ) # truncate to middle UpperCamelCase : str = feat_extract.pad( __SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''np''' , ) UpperCamelCase : Optional[int] = input_a[input_name] UpperCamelCase : Optional[int] = feat_extract.pad( __SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = input_a[input_name] UpperCamelCase : Any = feat_extract.pad( __SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' ) UpperCamelCase : Optional[int] = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(__SCREAMING_SNAKE_CASE ) ) self.assertTrue(_inputs_have_equal_length(__SCREAMING_SNAKE_CASE ) ) self.assertTrue(_inputs_are_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(__SCREAMING_SNAKE_CASE ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(__SCREAMING_SNAKE_CASE ): feat_extract.pad(__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(__SCREAMING_SNAKE_CASE ): feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='''longest''' , truncation=__SCREAMING_SNAKE_CASE )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(__SCREAMING_SNAKE_CASE ): feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='''longest''' , truncation=__SCREAMING_SNAKE_CASE )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(__SCREAMING_SNAKE_CASE ): feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='''max_length''' , truncation=__SCREAMING_SNAKE_CASE )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy UpperCamelCase : List[str] = 12 UpperCamelCase : str = feat_extract.pad( __SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : str = input_a[input_name] UpperCamelCase : Tuple = feat_extract.pad( __SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : int = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of UpperCamelCase : Tuple = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: UpperCamelCase : Union[str, Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(__SCREAMING_SNAKE_CASE ) ) self.assertFalse(_inputs_have_equal_length(__SCREAMING_SNAKE_CASE ) ) def _lowercase ( self ): """simple docstring""" self._check_padding(numpify=__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" self._check_padding(numpify=__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" self._check_truncation(numpify=__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" self._check_truncation(numpify=__SCREAMING_SNAKE_CASE ) @require_torch def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) UpperCamelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common() UpperCamelCase : Tuple = feat_extract.model_input_names[0] UpperCamelCase : Dict = BatchFeature({input_name: speech_inputs} ) UpperCamelCase : Any = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''np''' )[input_name] UpperCamelCase : Any = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) @require_tf def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) UpperCamelCase : int = self.feat_extract_tester.prepare_inputs_for_common() UpperCamelCase : List[Any] = feat_extract.model_input_names[0] UpperCamelCase : Tuple = BatchFeature({input_name: speech_inputs} ) UpperCamelCase : Any = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''np''' )[input_name] UpperCamelCase : int = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''tf''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = self.feat_extract_dict UpperCamelCase : Dict = True UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_common() UpperCamelCase : str = [len(__SCREAMING_SNAKE_CASE ) for x in speech_inputs] UpperCamelCase : Dict = feat_extract.model_input_names[0] UpperCamelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} ) UpperCamelCase : Any = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''np''' ) self.assertIn('''attention_mask''' , __SCREAMING_SNAKE_CASE ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = self.feat_extract_dict UpperCamelCase : Optional[Any] = True UpperCamelCase : Tuple = self.feature_extraction_class(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = self.feat_extract_tester.prepare_inputs_for_common() UpperCamelCase : List[Any] = [len(__SCREAMING_SNAKE_CASE ) for x in speech_inputs] UpperCamelCase : Tuple = feat_extract.model_input_names[0] UpperCamelCase : int = BatchFeature({input_name: speech_inputs} ) UpperCamelCase : Dict = min(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = feat_extract.pad( __SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''np''' ) self.assertIn('''attention_mask''' , __SCREAMING_SNAKE_CASE ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
701
import requests from bsa import BeautifulSoup def a ( SCREAMING_SNAKE_CASE_ : str = "AAPL" ): """simple docstring""" UpperCamelCase : Dict = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}""" UpperCamelCase : Any = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' ) UpperCamelCase : Dict = '''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
643
0
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features __UpperCAmelCase : Optional[int] = logging.get_logger(__name__) __UpperCAmelCase : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) __UpperCAmelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : str = field( default=_a, metadata={"help": "Model type selected in the list: " + ", ".join(_a)}) __UpperCamelCase : str = field( default=_a, metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}) __UpperCamelCase : int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) __UpperCamelCase : int = field( default=128, metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."}, ) __UpperCamelCase : int = field( default=64, metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) }, ) __UpperCamelCase : int = field( default=30, metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) }, ) __UpperCamelCase : bool = field( default=_a, metadata={"help": "Overwrite the cached training and evaluation sets"}) __UpperCamelCase : bool = field( default=_a, metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}) __UpperCamelCase : float = field( default=0.0, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) __UpperCamelCase : int = field( default=20, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) __UpperCamelCase : int = field( default=0, metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) }, ) __UpperCamelCase : int = field(default=1, metadata={"help": "multiple threads for converting example to features"}) class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Optional[int] = "train" __UpperCamelCase : Union[str, Any] = "dev" class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : SquadDataTrainingArguments __UpperCamelCase : List[SquadFeatures] __UpperCamelCase : Split __UpperCamelCase : bool def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = Split.train , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pt" , ): """simple docstring""" UpperCamelCase : Optional[int] = args UpperCamelCase : List[str] = is_language_sensitive UpperCamelCase : str = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): try: UpperCamelCase : str = Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) UpperCamelCase : int = mode # Load data features from cache or dataset file UpperCamelCase : List[str] = '''v2''' if args.version_2_with_negative else '''v1''' UpperCamelCase : Any = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. UpperCamelCase : Tuple = cached_features_file + '''.lock''' with FileLock(__SCREAMING_SNAKE_CASE ): if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache: UpperCamelCase : Union[str, Any] = time.time() UpperCamelCase : Optional[int] = torch.load(__SCREAMING_SNAKE_CASE ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. UpperCamelCase : Tuple = self.old_features['''features'''] UpperCamelCase : List[str] = self.old_features.get('''dataset''' , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = self.old_features.get('''examples''' , __SCREAMING_SNAKE_CASE ) logger.info( f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in""" ''' future run''' ) else: if mode == Split.dev: UpperCamelCase : Optional[int] = self.processor.get_dev_examples(args.data_dir ) else: UpperCamelCase : int = self.processor.get_train_examples(args.data_dir ) UpperCamelCase : Tuple = squad_convert_examples_to_features( examples=self.examples , tokenizer=__SCREAMING_SNAKE_CASE , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Union[str, Any] = time.time() torch.save( {'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , __SCREAMING_SNAKE_CASE , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" ) def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = self.features[i] UpperCamelCase : Optional[Any] = torch.tensor(feature.input_ids , dtype=torch.long ) UpperCamelCase : int = torch.tensor(feature.attention_mask , dtype=torch.long ) UpperCamelCase : Any = torch.tensor(feature.token_type_ids , dtype=torch.long ) UpperCamelCase : str = torch.tensor(feature.cls_index , dtype=torch.long ) UpperCamelCase : Tuple = torch.tensor(feature.p_mask , dtype=torch.float ) UpperCamelCase : int = torch.tensor(feature.is_impossible , dtype=torch.float ) UpperCamelCase : List[Any] = { '''input_ids''': input_ids, '''attention_mask''': attention_mask, '''token_type_ids''': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} ) if self.args.version_2_with_negative: inputs.update({'''is_impossible''': is_impossible} ) if self.is_language_sensitive: inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: UpperCamelCase : str = torch.tensor(feature.start_position , dtype=torch.long ) UpperCamelCase : List[Any] = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} ) return inputs
702
def a ( SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" if number > 0: raise ValueError('''input must be a negative integer''' ) UpperCamelCase : List[str] = len(bin(SCREAMING_SNAKE_CASE_ )[3:] ) UpperCamelCase : List[str] = bin(abs(SCREAMING_SNAKE_CASE_ ) - (1 << binary_number_length) )[3:] UpperCamelCase : Dict = ( ( '''1''' + '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE_ )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
643
0
# This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests __UpperCAmelCase : Any = open # noqa: we just need to have a builtin inside this module to test it properly
703
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase : str = logging.get_logger(__name__) __UpperCAmelCase : Dict = { "hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Optional[int] = "yolos" def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=[512, 864] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = hidden_size UpperCamelCase : List[Any] = num_hidden_layers UpperCamelCase : int = num_attention_heads UpperCamelCase : Dict = intermediate_size UpperCamelCase : Dict = hidden_act UpperCamelCase : int = hidden_dropout_prob UpperCamelCase : Any = attention_probs_dropout_prob UpperCamelCase : Optional[Any] = initializer_range UpperCamelCase : List[Any] = layer_norm_eps UpperCamelCase : int = image_size UpperCamelCase : Any = patch_size UpperCamelCase : str = num_channels UpperCamelCase : str = qkv_bias UpperCamelCase : Tuple = num_detection_tokens UpperCamelCase : List[Any] = use_mid_position_embeddings UpperCamelCase : Dict = auxiliary_loss # Hungarian matcher UpperCamelCase : Optional[Any] = class_cost UpperCamelCase : Union[str, Any] = bbox_cost UpperCamelCase : Any = giou_cost # Loss coefficients UpperCamelCase : List[Any] = bbox_loss_coefficient UpperCamelCase : Union[str, Any] = giou_loss_coefficient UpperCamelCase : Dict = eos_coefficient class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[str] = version.parse("1.11") @property def _lowercase ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _lowercase ( self ): """simple docstring""" return 1e-4 @property def _lowercase ( self ): """simple docstring""" return 12
643
0
import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device __UpperCAmelCase : Dict = False class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' pass @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCamelCase : str = torch.manual_seed(0 ) UpperCamelCase : Union[str, Any] = pipe( image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images UpperCamelCase : List[Any] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase : Dict = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
704
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) UpperCamelCase : int = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" UpperCamelCase : List[str] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] UpperCamelCase : Tuple = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
643
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase : Optional[int] = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Optional[int] = [ "SEW_PRETRAINED_MODEL_ARCHIVE_LIST", "SEWForCTC", "SEWForSequenceClassification", "SEWModel", "SEWPreTrainedModel", ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys __UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
705
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ): """simple docstring""" UpperCamelCase : List[str] = k_size // 2 UpperCamelCase , UpperCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center] UpperCamelCase : Dict = 1 / (2 * pi * sigma) * exp(-(square(SCREAMING_SNAKE_CASE_ ) + square(SCREAMING_SNAKE_CASE_ )) / (2 * square(SCREAMING_SNAKE_CASE_ )) ) return g def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase , UpperCamelCase : Tuple = image.shape[0], image.shape[1] # dst image height and width UpperCamelCase : str = height - k_size + 1 UpperCamelCase : Optional[int] = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows UpperCamelCase : List[Any] = zeros((dst_height * dst_width, k_size * k_size) ) UpperCamelCase : Tuple = 0 for i, j in product(range(SCREAMING_SNAKE_CASE_ ) , range(SCREAMING_SNAKE_CASE_ ) ): UpperCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] ) UpperCamelCase : Dict = window row += 1 # turn the kernel into shape(k*k, 1) UpperCamelCase : Optional[int] = gen_gaussian_kernel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = ravel(SCREAMING_SNAKE_CASE_ ) # reshape and get the dst image UpperCamelCase : Optional[int] = dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ ) return dst if __name__ == "__main__": # read original image __UpperCAmelCase : Union[str, Any] = imread(r"../image_data/lena.jpg") # turn image in gray scale value __UpperCAmelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size __UpperCAmelCase : Optional[int] = gaussian_filter(gray, 3, sigma=1) __UpperCAmelCase : List[Any] = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("gaussian filter with 3x3 mask", gaussianaxa) imshow("gaussian filter with 5x5 mask", gaussianaxa) waitKey()
643
0
'''simple docstring''' import math import tensorflow as tf from packaging import version def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ): """simple docstring""" UpperCamelCase : List[Any] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ): """simple docstring""" UpperCamelCase : List[str] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = tf.cast(math.pi , x.dtype ) UpperCamelCase : int = tf.cast(0.044715 , x.dtype ) UpperCamelCase : List[str] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(SCREAMING_SNAKE_CASE_ , 3 )) )) return x * cdf def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ ) return x * tf.tanh(tf.math.softplus(SCREAMING_SNAKE_CASE_ ) ) def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" UpperCamelCase : Optional[Any] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = tf.cast(0.044715 , x.dtype ) UpperCamelCase : Optional[Any] = tf.cast(0.7978845608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" UpperCamelCase : Tuple = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def a ( SCREAMING_SNAKE_CASE_ : Dict ): """simple docstring""" return tf.clip_by_value(_gelu(SCREAMING_SNAKE_CASE_ ) , -1_0 , 1_0 ) def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int=-1 ): """simple docstring""" UpperCamelCase : Optional[int] = tf.split(SCREAMING_SNAKE_CASE_ , 2 , axis=SCREAMING_SNAKE_CASE_ ) return a * tf.math.sigmoid(SCREAMING_SNAKE_CASE_ ) if version.parse(tf.version.VERSION) >= version.parse("2.4"): def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ): """simple docstring""" return tf.keras.activations.gelu(SCREAMING_SNAKE_CASE_ , approximate=SCREAMING_SNAKE_CASE_ ) __UpperCAmelCase : str = tf.keras.activations.gelu __UpperCAmelCase : str = approximate_gelu_wrap else: __UpperCAmelCase : Union[str, Any] = _gelu __UpperCAmelCase : Union[str, Any] = _gelu_new __UpperCAmelCase : str = { "gelu": gelu, "gelu_10": gelu_aa, "gelu_fast": gelu_fast, "gelu_new": gelu_new, "glu": glu, "mish": mish, "quick_gelu": quick_gelu, "relu": tf.keras.activations.relu, "sigmoid": tf.keras.activations.sigmoid, "silu": tf.keras.activations.swish, "swish": tf.keras.activations.swish, "tanh": tf.keras.activations.tanh, } def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ): """simple docstring""" if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
706
from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def a ( SCREAMING_SNAKE_CASE_ : bool = True , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" if not is_tqdm_available(): raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' ) UpperCamelCase : int = False if main_process_only: UpperCamelCase : int = PartialState().local_process_index == 0 return _tqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , disable=SCREAMING_SNAKE_CASE_ )
643
0
from math import factorial class UpperCAmelCase_ : '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Dict = real if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : Union[str, Any] = [1] * rank else: UpperCamelCase : List[str] = rank def __repr__( self ): """simple docstring""" return ( f"""{self.real}+""" f"""{"+".join(str(__SCREAMING_SNAKE_CASE )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}""" ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , __SCREAMING_SNAKE_CASE ) def __add__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return Dual(self.real + other , self.duals ) UpperCamelCase : List[str] = self.duals.copy() UpperCamelCase : Any = other.duals.copy() if len(__SCREAMING_SNAKE_CASE ) > len(__SCREAMING_SNAKE_CASE ): o_dual.extend([1] * (len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE )) ) elif len(__SCREAMING_SNAKE_CASE ) < len(__SCREAMING_SNAKE_CASE ): s_dual.extend([1] * (len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE )) ) UpperCamelCase : Union[str, Any] = [] for i in range(len(__SCREAMING_SNAKE_CASE ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , __SCREAMING_SNAKE_CASE ) __UpperCamelCase : Dict = __add__ def __sub__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self + other * -1 def __mul__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : int = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , __SCREAMING_SNAKE_CASE ) __UpperCamelCase : str = __mul__ def __truediv__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : List[str] = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , __SCREAMING_SNAKE_CASE ) raise ValueError def __floordiv__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : int = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , __SCREAMING_SNAKE_CASE ) raise ValueError def __pow__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if n < 0 or isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError('''power must be a positive integer''' ) if n == 0: return 1 if n == 1: return self UpperCamelCase : Optional[Any] = self for _ in range(n - 1 ): x *= self return x def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" if not callable(SCREAMING_SNAKE_CASE_ ): raise ValueError('''differentiate() requires a function as input for func''' ) if not isinstance(SCREAMING_SNAKE_CASE_ , (float, int) ): raise ValueError('''differentiate() requires a float as input for position''' ) if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError('''differentiate() requires an int as input for order''' ) UpperCamelCase : str = Dual(SCREAMING_SNAKE_CASE_ , 1 ) UpperCamelCase : List[str] = func(SCREAMING_SNAKE_CASE_ ) if order == 0: return result.real return result.duals[order - 1] * factorial(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod() def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ): """simple docstring""" return y**2 * y**4 print(differentiate(f, 9, 2))
707
import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase : Any = logging.get_logger(__name__) __UpperCAmelCase : int = "▁" __UpperCAmelCase : Tuple = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"} __UpperCAmelCase : Dict = { "sentencepiece_model_file": "sentencepiece.bpe.model", "vocab_file": "vocab.txt", } __UpperCAmelCase : Dict = { "vocab_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", }, "sentencepiece_model_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", }, } __UpperCAmelCase : str = { "ernie-m-base": 514, "ernie-m-large": 514, } __UpperCAmelCase : Optional[int] = { "ernie-m-base": {"do_lower_case": False}, "ernie-m-large": {"do_lower_case": False}, } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[str] = ["input_ids"] __UpperCamelCase : List[str] = VOCAB_FILES_NAMES __UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION __UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : List[str] = RESOURCE_FILES_NAMES def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="utf8" , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , vocab_file=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[str] = do_lower_case UpperCamelCase : Dict = sentencepiece_model_ckpt UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: UpperCamelCase : Optional[Any] = self.load_vocab(filepath=__SCREAMING_SNAKE_CASE ) else: UpperCamelCase : int = {self.sp_model.id_to_piece(__SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )} UpperCamelCase : str = {v: k for k, v in self.vocab.items()} def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if text is None: return None UpperCamelCase : str = self.tokenize(__SCREAMING_SNAKE_CASE ) UpperCamelCase , UpperCamelCase : str = '''''', [] for i, ch in enumerate(__SCREAMING_SNAKE_CASE ): if ch in self.SP_CHAR_MAPPING: UpperCamelCase : Optional[int] = self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE ) else: UpperCamelCase : Optional[Any] = unicodedata.normalize('''NFKC''' , __SCREAMING_SNAKE_CASE ) if self.is_whitespace(__SCREAMING_SNAKE_CASE ): continue normalized_text += ch char_mapping.extend([i] * len(__SCREAMING_SNAKE_CASE ) ) UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = normalized_text, [], 0 if self.do_lower_case: UpperCamelCase : Tuple = text.lower() for token in split_tokens: if token[:1] == "▁": UpperCamelCase : Any = token[1:] UpperCamelCase : Optional[int] = text[offset:].index(__SCREAMING_SNAKE_CASE ) + offset UpperCamelCase : List[Any] = start + len(__SCREAMING_SNAKE_CASE ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) UpperCamelCase : str = end return token_mapping @property def _lowercase ( self ): """simple docstring""" return len(self.vocab ) def _lowercase ( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self ): """simple docstring""" UpperCamelCase : Optional[Any] = self.__dict__.copy() UpperCamelCase : str = None return state def __setstate__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCamelCase : Optional[int] = {} UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in text) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=0.1 ): """simple docstring""" if self.sp_model_kwargs.get('''enable_sampling''' ) is True: UpperCamelCase : List[str] = True if self.sp_model_kwargs.get('''alpha''' ) is not None: UpperCamelCase : Any = self.sp_model_kwargs.get('''alpha''' ) if self.sp_model_kwargs.get('''nbest_size''' ) is not None: UpperCamelCase : Tuple = self.sp_model_kwargs.get('''nbest_size''' ) if not enable_sampling: UpperCamelCase : int = self.sp_model.EncodeAsPieces(__SCREAMING_SNAKE_CASE ) else: UpperCamelCase : Optional[Any] = self.sp_model.SampleEncodeAsPieces(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = [] for pi, piece in enumerate(__SCREAMING_SNAKE_CASE ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__SCREAMING_SNAKE_CASE ) and pi != 0: new_pieces.append(__SCREAMING_SNAKE_CASE ) continue else: continue UpperCamelCase : Any = 0 for i, chunk in enumerate(__SCREAMING_SNAKE_CASE ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__SCREAMING_SNAKE_CASE ) or self.is_punct(__SCREAMING_SNAKE_CASE ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCamelCase : Union[str, Any] = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCamelCase : Any = i if len(__SCREAMING_SNAKE_CASE ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[int] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip() return out_string def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip() return out_string def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.reverse_vocab.get(__SCREAMING_SNAKE_CASE , self.unk_token ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase : Any = [self.cls_token_id] UpperCamelCase : str = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if token_ids_a is None: # [CLS] X [SEP] return (len(__SCREAMING_SNAKE_CASE ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(__SCREAMING_SNAKE_CASE ) + 3) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__SCREAMING_SNAKE_CASE ) == 1: UpperCamelCase : Optional[int] = unicodedata.category(__SCREAMING_SNAKE_CASE ) if cat == "Zs": return True return False def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = {} with io.open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Tuple = line.rstrip('''\n''' ) UpperCamelCase : List[Any] = int(__SCREAMING_SNAKE_CASE ) return token_to_idx def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : Union[str, Any] = 0 if os.path.isdir(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Dict = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: UpperCamelCase : Union[str, Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) UpperCamelCase : List[Any] = token_index writer.write(token + '''\n''' ) index += 1 UpperCamelCase : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , '''sentencepiece.bpe.model''' ) with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (vocab_file,)
643
0
from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def a ( SCREAMING_SNAKE_CASE_ : bool = True , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" if not is_tqdm_available(): raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' ) UpperCamelCase : int = False if main_process_only: UpperCamelCase : int = PartialState().local_process_index == 0 return _tqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , disable=SCREAMING_SNAKE_CASE_ )
708
from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig __UpperCAmelCase : List[Any] = [ "openmmlab/upernet-convnext-tiny", # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring __UpperCAmelCase : List[str] = "UperNetConfig" class UpperCAmelCase_ ( nn.Module): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , ): """simple docstring""" super().__init__() UpperCamelCase : str = nn.Convad( in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : int = nn.BatchNormad(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = nn.ReLU() def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = self.conv(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = self.batch_norm(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = self.activation(__SCREAMING_SNAKE_CASE ) return output class UpperCAmelCase_ ( nn.Module): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__() UpperCamelCase : List[Any] = [ nn.AdaptiveAvgPoolad(__SCREAMING_SNAKE_CASE ), UperNetConvModule(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : str = input for layer in self.layers: UpperCamelCase : int = layer(__SCREAMING_SNAKE_CASE ) return hidden_state class UpperCAmelCase_ ( nn.Module): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__() UpperCamelCase : List[Any] = pool_scales UpperCamelCase : Dict = align_corners UpperCamelCase : Optional[int] = in_channels UpperCamelCase : Union[str, Any] = channels UpperCamelCase : List[str] = [] for i, pool_scale in enumerate(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , channels=__SCREAMING_SNAKE_CASE ) self.blocks.append(__SCREAMING_SNAKE_CASE ) self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : str = [] for ppm in self.blocks: UpperCamelCase : List[str] = ppm(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = nn.functional.interpolate( __SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners ) ppm_outs.append(__SCREAMING_SNAKE_CASE ) return ppm_outs class UpperCAmelCase_ ( nn.Module): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__() UpperCamelCase : int = config UpperCamelCase : List[str] = config.pool_scales # e.g. (1, 2, 3, 6) UpperCamelCase : Optional[int] = in_channels UpperCamelCase : str = config.hidden_size UpperCamelCase : str = False UpperCamelCase : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) # PSP Module UpperCamelCase : Optional[int] = UperNetPyramidPoolingModule( self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , ) UpperCamelCase : str = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) # FPN Module UpperCamelCase : Union[str, Any] = nn.ModuleList() UpperCamelCase : Union[str, Any] = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer UpperCamelCase : List[Any] = UperNetConvModule(__SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 ) UpperCamelCase : int = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 ) self.lateral_convs.append(__SCREAMING_SNAKE_CASE ) self.fpn_convs.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = UperNetConvModule( len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) def _lowercase ( self ): """simple docstring""" self.apply(self._init_weights ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = inputs[-1] UpperCamelCase : int = [x] psp_outs.extend(self.psp_modules(__SCREAMING_SNAKE_CASE ) ) UpperCamelCase : Any = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 ) UpperCamelCase : Union[str, Any] = self.bottleneck(__SCREAMING_SNAKE_CASE ) return output def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : str = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(__SCREAMING_SNAKE_CASE ) ) # build top-down path UpperCamelCase : int = len(__SCREAMING_SNAKE_CASE ) for i in range(used_backbone_levels - 1 , 0 , -1 ): UpperCamelCase : Optional[int] = laterals[i - 1].shape[2:] UpperCamelCase : Optional[Any] = laterals[i - 1] + nn.functional.interpolate( laterals[i] , size=__SCREAMING_SNAKE_CASE , mode='''bilinear''' , align_corners=self.align_corners ) # build outputs UpperCamelCase : str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 , 0 , -1 ): UpperCamelCase : int = nn.functional.interpolate( fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners ) UpperCamelCase : str = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 ) UpperCamelCase : Tuple = self.fpn_bottleneck(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE ) return output class UpperCAmelCase_ ( nn.Module): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 1 ): """simple docstring""" super().__init__() UpperCamelCase : Dict = config UpperCamelCase : Optional[Any] = config.auxiliary_in_channels UpperCamelCase : Union[str, Any] = config.auxiliary_channels UpperCamelCase : Union[str, Any] = config.auxiliary_num_convs UpperCamelCase : Optional[Any] = config.auxiliary_concat_input UpperCamelCase : List[str] = in_index UpperCamelCase : Any = (kernel_size // 2) * dilation UpperCamelCase : Optional[Any] = [] convs.append( UperNetConvModule( self.in_channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) ) if self.num_convs == 0: UpperCamelCase : str = nn.Identity() else: UpperCamelCase : Dict = nn.Sequential(*__SCREAMING_SNAKE_CASE ) if self.concat_input: UpperCamelCase : Union[str, Any] = UperNetConvModule( self.in_channels + self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 ) UpperCamelCase : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) def _lowercase ( self ): """simple docstring""" self.apply(self._init_weights ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = encoder_hidden_states[self.in_index] UpperCamelCase : str = self.convs(__SCREAMING_SNAKE_CASE ) if self.concat_input: UpperCamelCase : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) ) UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE ) return output class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Optional[Any] = UperNetConfig __UpperCamelCase : Optional[int] = "pixel_values" __UpperCamelCase : Dict = True def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def _lowercase ( self ): """simple docstring""" self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ): """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : str = value __UpperCAmelCase : List[Any] = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" __UpperCAmelCase : Union[str, Any] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.", _a, ) class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) UpperCamelCase : int = UperNetHead(__SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels ) UpperCamelCase : int = UperNetFCNHead(__SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) ) @replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC ) def _lowercase ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ): """simple docstring""" UpperCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase : str = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions UpperCamelCase : Tuple = self.backbone.forward_with_filtered_kwargs( __SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = outputs.feature_maps UpperCamelCase : Union[str, Any] = self.decode_head(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = nn.functional.interpolate(__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = None if self.auxiliary_head is not None: UpperCamelCase : int = self.auxiliary_head(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = nn.functional.interpolate( __SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = None if labels is not None: if self.config.num_labels == 1: raise ValueError('''The number of labels should be greater than one''' ) else: # compute weighted loss UpperCamelCase : Optional[int] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) UpperCamelCase : Tuple = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: UpperCamelCase : Optional[Any] = (logits,) + outputs[1:] else: UpperCamelCase : int = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
643
0
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" UpperCamelCase : Dict = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() ) UpperCamelCase : int = sum([np.prod(p.size() ) for p in model_parameters] ) return params __UpperCAmelCase : List[str] = logging.getLogger(__name__) def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict ): """simple docstring""" if metric == "rouge2": UpperCamelCase : str = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": UpperCamelCase : Optional[Any] = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": UpperCamelCase : int = '''{val_avg_em:.4f}-{step_count}''' else: raise NotImplementedError( F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" ''' function.''' ) UpperCamelCase : Tuple = ModelCheckpoint( dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ): """simple docstring""" return EarlyStopping( monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , ) class UpperCAmelCase_ ( pl.Callback): '''simple docstring''' def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Union[str, Any] = {f"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__SCREAMING_SNAKE_CASE ) @rank_zero_only def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ): """simple docstring""" logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) UpperCamelCase : Union[str, Any] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} ) # Log results UpperCamelCase : List[str] = Path(pl_module.hparams.output_dir ) if type_path == "test": UpperCamelCase : Union[str, Any] = od / '''test_results.txt''' UpperCamelCase : Any = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. UpperCamelCase : Optional[int] = od / f"""{type_path}_results/{trainer.global_step:05d}.txt""" UpperCamelCase : Tuple = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) generations_file.parent.mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , '''a+''' ) as writer: for key in sorted(__SCREAMING_SNAKE_CASE ): if key in ["log", "progress_bar", "preds"]: continue UpperCamelCase : Any = metrics[key] if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ): UpperCamelCase : Any = val.item() UpperCamelCase : Dict = f"""{key}: {val:.6f}\n""" writer.write(__SCREAMING_SNAKE_CASE ) if not save_generations: return if "preds" in metrics: UpperCamelCase : int = '''\n'''.join(metrics['''preds'''] ) generations_file.open('''w+''' ).write(__SCREAMING_SNAKE_CASE ) @rank_zero_only def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" try: UpperCamelCase : Optional[int] = pl_module.model.model.num_parameters() except AttributeError: UpperCamelCase : Dict = pl_module.model.num_parameters() UpperCamelCase : int = count_trainable_parameters(__SCREAMING_SNAKE_CASE ) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} ) @rank_zero_only def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''test''' ) @rank_zero_only def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
709
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration __UpperCAmelCase : Optional[int] = 500000 __UpperCAmelCase , __UpperCAmelCase : Any = os.path.split(__file__) __UpperCAmelCase : int = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ): """simple docstring""" UpperCamelCase : Tuple = dataset.map(**SCREAMING_SNAKE_CASE_ ) @get_duration def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Any ): """simple docstring""" UpperCamelCase : int = dataset.filter(**SCREAMING_SNAKE_CASE_ ) def a ( ): """simple docstring""" UpperCamelCase : Optional[int] = {'''num examples''': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} ) UpperCamelCase : List[str] = generate_example_dataset( os.path.join(SCREAMING_SNAKE_CASE_ , '''dataset.arrow''' ) , SCREAMING_SNAKE_CASE_ , num_examples=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE_ ) def tokenize(SCREAMING_SNAKE_CASE_ : Dict ): return tokenizer(examples['''text'''] ) UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ ) with dataset.formatted_as(type='''numpy''' ): UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ ) with dataset.formatted_as(type='''pandas''' ): UpperCamelCase : int = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ ) with dataset.formatted_as(type='''torch''' , columns='''numbers''' ): UpperCamelCase : Dict = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ ) with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ): UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , function=SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = filter(SCREAMING_SNAKE_CASE_ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE_ ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
643
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) __UpperCAmelCase : int = { "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"], "processing_speech_to_text": ["Speech2TextProcessor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : List[Any] = ["Speech2TextTokenizer"] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Optional[int] = ["Speech2TextFeatureExtractor"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : int = [ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSpeech2TextPreTrainedModel", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Tuple = [ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys __UpperCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
710
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
643
0
from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def a ( SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" if not is_accelerate_available(): return method UpperCamelCase : Dict = version.parse(accelerate.__version__ ).base_version if version.parse(SCREAMING_SNAKE_CASE_ ) < version.parse('''0.17.0''' ): return method def wrapper(self : str , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ): if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ): self._hf_hook.pre_forward(self ) return method(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return wrapper
711
import torch from transformers import AutoModel class UpperCAmelCase_ ( torch.nn.Module): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ): """simple docstring""" super(__SCREAMING_SNAKE_CASE , self ).__init__() UpperCamelCase : List[str] = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 ) UpperCamelCase : List[Any] = torch.nn.Softmax(dim=1 ) def _lowercase ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ): """simple docstring""" return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = W_supports['''sizes'''].tolist() UpperCamelCase : Optional[int] = W_supports['''start_token_id'''].item() UpperCamelCase : Any = W_supports['''end_token_id'''].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] UpperCamelCase : Union[str, Any] = self.BERT(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = None UpperCamelCase : Any = None UpperCamelCase : Optional[Any] = W_supports['''input_ids'''] == start_token_id UpperCamelCase : Any = W_supports['''input_ids'''] == end_token_id for i, size in enumerate(__SCREAMING_SNAKE_CASE ): if i == 0: UpperCamelCase : Optional[int] = 0 else: UpperCamelCase : Tuple = support_sizes[i - 1] UpperCamelCase : Tuple = S[s : s + size][start_token_masks[s : s + size]] UpperCamelCase : List[str] = S[s : s + size][end_token_masks[s : s + size]] UpperCamelCase : Dict = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) UpperCamelCase : Tuple = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: UpperCamelCase : List[str] = torch.vstack((p_starts, p_start) ) UpperCamelCase : Union[str, Any] = torch.vstack((p_ends, p_end) ) else: UpperCamelCase : str = p_start UpperCamelCase : Optional[int] = p_end return p_starts, p_ends
643
0
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ): """simple docstring""" if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(SCREAMING_SNAKE_CASE_ , '''_dynamo''' ): return False return isinstance(SCREAMING_SNAKE_CASE_ , torch._dynamo.eval_frame.OptimizedModule ) def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : bool = True ): """simple docstring""" UpperCamelCase : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) UpperCamelCase : int = is_compiled_module(SCREAMING_SNAKE_CASE_ ) if is_compiled: UpperCamelCase : Any = model UpperCamelCase : Union[str, Any] = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Union[str, Any] = model.module if not keep_fpaa_wrapper: UpperCamelCase : List[str] = getattr(SCREAMING_SNAKE_CASE_ , '''forward''' ) UpperCamelCase : Optional[Any] = model.__dict__.pop('''_original_forward''' , SCREAMING_SNAKE_CASE_ ) if original_forward is not None: while hasattr(SCREAMING_SNAKE_CASE_ , '''__wrapped__''' ): UpperCamelCase : int = forward.__wrapped__ if forward == original_forward: break UpperCamelCase : Union[str, Any] = forward if getattr(SCREAMING_SNAKE_CASE_ , '''_converted_to_transformer_engine''' , SCREAMING_SNAKE_CASE_ ): convert_model(SCREAMING_SNAKE_CASE_ , to_transformer_engine=SCREAMING_SNAKE_CASE_ ) if is_compiled: UpperCamelCase : Union[str, Any] = model UpperCamelCase : Dict = compiled_model return model def a ( ): """simple docstring""" PartialState().wait_for_everyone() def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any ): """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif PartialState().local_process_index == 0: torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @contextmanager def a ( **SCREAMING_SNAKE_CASE_ : Optional[int] ): """simple docstring""" for key, value in kwargs.items(): UpperCamelCase : Optional[int] = str(SCREAMING_SNAKE_CASE_ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def a ( SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" if not hasattr(SCREAMING_SNAKE_CASE_ , '''__qualname__''' ) and not hasattr(SCREAMING_SNAKE_CASE_ , '''__name__''' ): UpperCamelCase : str = getattr(SCREAMING_SNAKE_CASE_ , '''__class__''' , SCREAMING_SNAKE_CASE_ ) if hasattr(SCREAMING_SNAKE_CASE_ , '''__qualname__''' ): return obj.__qualname__ if hasattr(SCREAMING_SNAKE_CASE_ , '''__name__''' ): return obj.__name__ return str(SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ): """simple docstring""" for key, value in source.items(): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Union[str, Any] = destination.setdefault(SCREAMING_SNAKE_CASE_ , {} ) merge_dicts(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase : Optional[Any] = value return destination def a ( SCREAMING_SNAKE_CASE_ : int = None ): """simple docstring""" if port is None: UpperCamelCase : Dict = 2_9_5_0_0 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('''localhost''', port) ) == 0
712
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase_ ( _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : str = DebertaTokenizer __UpperCamelCase : Optional[int] = True __UpperCamelCase : Optional[int] = DebertaTokenizerFast def _lowercase ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCamelCase : Optional[int] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] UpperCamelCase : Tuple = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) UpperCamelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] UpperCamelCase : List[Any] = {'''unk_token''': '''[UNK]'''} UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) ) def _lowercase ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = '''lower newer''' UpperCamelCase : Union[str, Any] = '''lower newer''' return input_text, output_text def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.get_tokenizer() UpperCamelCase : int = '''lower newer''' UpperCamelCase : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] UpperCamelCase : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token] UpperCamelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = self.get_tokenizer() UpperCamelCase : Optional[Any] = tokenizer('''Hello''' , '''World''' ) UpperCamelCase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE ) @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) UpperCamelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = tokenizer.encode( '''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) UpperCamelCase : str = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] UpperCamelCase : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']] # fmt: off UpperCamelCase : int = { '''input_ids''': [ [1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on UpperCamelCase : List[str] = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE ) for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
643
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __UpperCAmelCase : Union[str, Any] = { "configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : List[Any] = [ "RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "ResNetForImageClassification", "ResNetModel", "ResNetPreTrainedModel", "ResNetBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Dict = [ "TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFResNetForImageClassification", "TFResNetModel", "TFResNetPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : List[str] = [ "FlaxResNetForImageClassification", "FlaxResNetModel", "FlaxResNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
713
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class UpperCAmelCase_ ( _a): '''simple docstring''' def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file: UpperCamelCase : str = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' ) UpperCamelCase : Optional[int] = input_file.read() UpperCamelCase : Union[str, Any] = regexp.search(__SCREAMING_SNAKE_CASE ) return match def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file: UpperCamelCase : Optional[int] = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL ) UpperCamelCase : Tuple = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCamelCase : Dict = regexp.finditer(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = Path('''./datasets''' ) UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__SCREAMING_SNAKE_CASE ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = Path('''./datasets''' ) UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_print_statements(str(__SCREAMING_SNAKE_CASE ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
643
0
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class UpperCAmelCase_ : '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , ): """simple docstring""" UpperCamelCase : Optional[Any] = parent UpperCamelCase : Dict = out_indices if out_indices is not None else [4] UpperCamelCase : List[str] = stage_names UpperCamelCase : Any = out_features UpperCamelCase : List[Any] = backbone UpperCamelCase : Tuple = batch_size UpperCamelCase : int = image_size UpperCamelCase : Optional[int] = num_channels UpperCamelCase : Optional[int] = use_pretrained_backbone UpperCamelCase : Optional[int] = is_training def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase : List[Any] = self.get_config() return config, pixel_values def _lowercase ( self ): """simple docstring""" return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = TimmBackbone(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): UpperCamelCase : int = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = self.prepare_config_and_inputs() UpperCamelCase : List[str] = config_and_inputs UpperCamelCase : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch @require_timm class UpperCAmelCase_ ( _a, _a, _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : str = (TimmBackbone,) if is_torch_available() else () __UpperCamelCase : List[str] = {"feature-extraction": TimmBackbone} if is_torch_available() else {} __UpperCamelCase : str = False __UpperCamelCase : Optional[Any] = False __UpperCamelCase : Union[str, Any] = False __UpperCamelCase : Any = False def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = TimmBackboneModelTester(self ) UpperCamelCase : Tuple = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = '''resnet18''' UpperCamelCase : List[Any] = '''microsoft/resnet-18''' UpperCamelCase : Optional[Any] = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , use_timm_backbone=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) UpperCamelCase : Optional[Any] = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , use_timm_backbone=__SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] ) UpperCamelCase : str = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip('''TimmBackbone initialization is managed on the timm side''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip('''Safetensors is not supported by timm.''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _lowercase ( self ): """simple docstring""" pass def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Dict = model_class(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase : Tuple = [*signature.parameters.keys()] UpperCamelCase : List[str] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase : Dict = True UpperCamelCase : Optional[int] = self.has_attentions # no need to test all models as different heads yield the same functionality UpperCamelCase : Optional[Any] = self.all_model_classes[0] UpperCamelCase : Tuple = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : int = model(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = outputs[0][-1] # Encoder-/Decoder-only models UpperCamelCase : List[Any] = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: UpperCamelCase : Union[str, Any] = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Dict = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : List[Any] = model(**__SCREAMING_SNAKE_CASE ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None UpperCamelCase : Optional[int] = copy.deepcopy(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = None UpperCamelCase : Dict = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : Optional[int] = model(**__SCREAMING_SNAKE_CASE ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights UpperCamelCase : int = copy.deepcopy(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = False UpperCamelCase : Any = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : int = model(**__SCREAMING_SNAKE_CASE )
714
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : Any = XGLMConfig __UpperCamelCase : Dict = {} __UpperCamelCase : List[str] = "gelu" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=14 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=0.02 , ): """simple docstring""" UpperCamelCase : Any = parent UpperCamelCase : Optional[int] = batch_size UpperCamelCase : str = seq_length UpperCamelCase : List[str] = is_training UpperCamelCase : Tuple = use_input_mask UpperCamelCase : Union[str, Any] = use_labels UpperCamelCase : int = vocab_size UpperCamelCase : Optional[int] = d_model UpperCamelCase : Any = num_hidden_layers UpperCamelCase : List[str] = num_attention_heads UpperCamelCase : Optional[Any] = ffn_dim UpperCamelCase : Optional[int] = activation_function UpperCamelCase : List[str] = activation_dropout UpperCamelCase : Any = attention_dropout UpperCamelCase : str = max_position_embeddings UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : int = None UpperCamelCase : Dict = 0 UpperCamelCase : int = 2 UpperCamelCase : Any = 1 def _lowercase ( self ): """simple docstring""" return XGLMConfig.from_pretrained('''facebook/xglm-564M''' ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) UpperCamelCase : int = None if self.use_input_mask: UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase : Tuple = self.get_config() UpperCamelCase : str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowercase ( self ): """simple docstring""" return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__SCREAMING_SNAKE_CASE , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Dict = config_and_inputs UpperCamelCase : List[str] = { '''input_ids''': input_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_tf class UpperCAmelCase_ ( _a, _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __UpperCamelCase : Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else () __UpperCamelCase : Any = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) __UpperCamelCase : Optional[int] = False __UpperCamelCase : List[Any] = False __UpperCamelCase : List[Any] = False def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = TFXGLMModelTester(self ) UpperCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , n_embd=37 ) def _lowercase ( self ): """simple docstring""" self.config_tester.run_common_tests() @slow def _lowercase ( self ): """simple docstring""" for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : List[Any] = TFXGLMModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' ) def _lowercase ( self ): """simple docstring""" super().test_resize_token_embeddings() @require_tf class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' @slow def _lowercase ( self , __SCREAMING_SNAKE_CASE=True ): """simple docstring""" UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) UpperCamelCase : List[Any] = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off UpperCamelCase : str = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581] # fmt: on UpperCamelCase : Union[str, Any] = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __SCREAMING_SNAKE_CASE ) @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) tf.random.set_seed(0 ) UpperCamelCase : Tuple = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' ) UpperCamelCase : int = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(''':/CPU:0''' ): UpperCamelCase : str = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , seed=[7, 0] ) UpperCamelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = ( '''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due''' ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) UpperCamelCase : Tuple = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) UpperCamelCase : Tuple = '''left''' # use different length sentences to test batching UpperCamelCase : Any = [ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When''', '''Hello, my dog is a little''', ] UpperCamelCase : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' , padding=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = inputs['''input_ids'''] UpperCamelCase : Optional[int] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 ) UpperCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids UpperCamelCase : Optional[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 ) UpperCamelCase : str = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids UpperCamelCase : List[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 ) UpperCamelCase : Any = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = [ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ''' '''a single''', '''Hello, my dog is a little bit of a shy one, but he is very friendly''', ] self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
643
0
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=_a) class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : str = field(default="image-classification", metadata={"include_in_asdict_even_if_is_default": True}) __UpperCamelCase : ClassVar[Features] = Features({"image": Image()}) __UpperCamelCase : ClassVar[Features] = Features({"labels": ClassLabel}) __UpperCamelCase : str = "image" __UpperCamelCase : str = "labels" def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if self.label_column not in features: raise ValueError(f"""Column {self.label_column} is not present in features.""" ) if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ): raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" ) UpperCamelCase : str = copy.deepcopy(self ) UpperCamelCase : Optional[Any] = self.label_schema.copy() UpperCamelCase : Union[str, Any] = features[self.label_column] UpperCamelCase : Union[str, Any] = label_schema return task_template @property def _lowercase ( self ): """simple docstring""" return { self.image_column: "image", self.label_column: "labels", }
715
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : int = logging.get_logger(__name__) __UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} __UpperCAmelCase : str = { "vocab_file": { "facebook/mbart-large-en-ro": ( "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model" ), "facebook/mbart-large-cc25": ( "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json", "facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json", }, } __UpperCAmelCase : Union[str, Any] = { "facebook/mbart-large-en-ro": 1024, "facebook/mbart-large-cc25": 1024, } # fmt: off __UpperCAmelCase : Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[str] = VOCAB_FILES_NAMES __UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"] __UpperCamelCase : Any = MBartTokenizer __UpperCamelCase : List[int] = [] __UpperCamelCase : List[int] = [] def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token super().__init__( vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Dict = vocab_file UpperCamelCase : List[str] = False if not self.vocab_file else True UpperCamelCase : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) UpperCamelCase : List[Any] = { lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES } UpperCamelCase : Dict = src_lang if src_lang is not None else '''en_XX''' UpperCamelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang ) UpperCamelCase : str = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _lowercase ( self ): """simple docstring""" return self._src_lang @src_lang.setter def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Union[str, Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : str = [self.sep_token_id] UpperCamelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) UpperCamelCase : List[str] = src_lang UpperCamelCase : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = tgt_lang_id return inputs def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : Optional[Any] = src_lang UpperCamelCase : Optional[int] = tgt_lang return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def _lowercase ( self ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = [] UpperCamelCase : Dict = [self.eos_token_id, self.cur_lang_code] UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCamelCase : Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code] UpperCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCamelCase : Optional[int] = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return UpperCamelCase : Optional[int] = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
643
0
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __UpperCAmelCase : Union[str, Any] = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' __UpperCamelCase : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __UpperCamelCase : Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: __UpperCamelCase : Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: __UpperCamelCase : List[str] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = pipeline( task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' ) UpperCamelCase : Optional[int] = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] ) UpperCamelCase : Optional[int] = text_classifier('''This is great !''' , top_k=2 ) self.assertEqual( nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}] ) UpperCamelCase : List[Any] = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 ) self.assertEqual( nested_simplify(__SCREAMING_SNAKE_CASE ) , [ [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}], [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}], ] , ) UpperCamelCase : Optional[Any] = text_classifier('''This is great !''' , top_k=1 ) self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] ) # Legacy behavior UpperCamelCase : List[str] = text_classifier('''This is great !''' , return_all_scores=__SCREAMING_SNAKE_CASE ) self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] ) UpperCamelCase : int = text_classifier('''This is great !''' , return_all_scores=__SCREAMING_SNAKE_CASE ) self.assertEqual( nested_simplify(__SCREAMING_SNAKE_CASE ) , [[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]] ) UpperCamelCase : str = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=__SCREAMING_SNAKE_CASE ) self.assertEqual( nested_simplify(__SCREAMING_SNAKE_CASE ) , [ [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}], [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}], ] , ) UpperCamelCase : List[str] = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=__SCREAMING_SNAKE_CASE ) self.assertEqual( nested_simplify(__SCREAMING_SNAKE_CASE ) , [ {'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_0''', '''score''': 0.504}, ] , ) @require_torch def _lowercase ( self ): """simple docstring""" import torch UpperCamelCase : Optional[int] = pipeline( task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , ) UpperCamelCase : List[Any] = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] ) @require_tf def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = pipeline( task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' ) UpperCamelCase : Optional[int] = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] ) @slow @require_torch def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = pipeline('''text-classification''' ) UpperCamelCase : Tuple = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] ) UpperCamelCase : int = text_classifier('''This is bad !''' ) self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] ) UpperCamelCase : Tuple = text_classifier('''Birds are a type of animal''' ) self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] ) @slow @require_tf def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = pipeline('''text-classification''' , framework='''tf''' ) UpperCamelCase : Union[str, Any] = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] ) UpperCamelCase : Optional[int] = text_classifier('''This is bad !''' ) self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] ) UpperCamelCase : int = text_classifier('''Birds are a type of animal''' ) self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Dict = TextClassificationPipeline(model=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE ) return text_classifier, ["HuggingFace is in", "This is another test"] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 UpperCamelCase : Tuple = '''HuggingFace is in''' UpperCamelCase : Tuple = text_classifier(__SCREAMING_SNAKE_CASE ) self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': ANY(__SCREAMING_SNAKE_CASE ), '''score''': ANY(__SCREAMING_SNAKE_CASE )}] ) self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() ) UpperCamelCase : int = ['''HuggingFace is in ''', '''Paris is in France'''] UpperCamelCase : Any = text_classifier(__SCREAMING_SNAKE_CASE ) self.assertEqual( nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': ANY(__SCREAMING_SNAKE_CASE ), '''score''': ANY(__SCREAMING_SNAKE_CASE )}, {'''label''': ANY(__SCREAMING_SNAKE_CASE ), '''score''': ANY(__SCREAMING_SNAKE_CASE )}] , ) self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() ) self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format UpperCamelCase : int = text_classifier(__SCREAMING_SNAKE_CASE , top_k=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(__SCREAMING_SNAKE_CASE ) , [[{'''label''': ANY(__SCREAMING_SNAKE_CASE ), '''score''': ANY(__SCREAMING_SNAKE_CASE )}] * N, [{'''label''': ANY(__SCREAMING_SNAKE_CASE ), '''score''': ANY(__SCREAMING_SNAKE_CASE )}] * N] , ) UpperCamelCase : Tuple = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''} UpperCamelCase : Union[str, Any] = text_classifier(__SCREAMING_SNAKE_CASE ) self.assertEqual( nested_simplify(__SCREAMING_SNAKE_CASE ) , {'''label''': ANY(__SCREAMING_SNAKE_CASE ), '''score''': ANY(__SCREAMING_SNAKE_CASE )} , ) self.assertTrue(outputs['''label'''] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. UpperCamelCase : str = [['''HuggingFace is in ''', '''Paris is in France''']] with self.assertRaises(__SCREAMING_SNAKE_CASE ): text_classifier(__SCREAMING_SNAKE_CASE ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility UpperCamelCase : str = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] ) self.assertEqual( nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': ANY(__SCREAMING_SNAKE_CASE ), '''score''': ANY(__SCREAMING_SNAKE_CASE )}] , ) self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
716
import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device __UpperCAmelCase : Dict = False class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' pass @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCamelCase : str = torch.manual_seed(0 ) UpperCamelCase : Union[str, Any] = pipe( image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images UpperCamelCase : List[Any] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase : Dict = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
643
0
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" return x if y == 0 else greatest_common_divisor(SCREAMING_SNAKE_CASE_ , x % y ) def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" return (x * y) // greatest_common_divisor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : int = 2_0 ): """simple docstring""" UpperCamelCase : Union[str, Any] = 1 for i in range(1 , n + 1 ): UpperCamelCase : int = lcm(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return g if __name__ == "__main__": print(f'''{solution() = }''')
717
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __UpperCAmelCase : Dict = logging.get_logger(__name__) class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Any = ["input_features"] def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=16_000 , __SCREAMING_SNAKE_CASE=160 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__( feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[str] = n_fft UpperCamelCase : Dict = hop_length UpperCamelCase : Dict = chunk_length UpperCamelCase : List[str] = chunk_length * sampling_rate UpperCamelCase : Dict = self.n_samples // hop_length UpperCamelCase : str = sampling_rate UpperCamelCase : Union[str, Any] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[str] = spectrogram( __SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , ) UpperCamelCase : int = log_spec[:, :-1] UpperCamelCase : int = np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 ) UpperCamelCase : Any = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 ): """simple docstring""" if attention_mask is not None: UpperCamelCase : List[Any] = np.array(__SCREAMING_SNAKE_CASE , np.intaa ) UpperCamelCase : Optional[Any] = [] for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ): UpperCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: UpperCamelCase : Optional[int] = padding_value normed_input_values.append(__SCREAMING_SNAKE_CASE ) else: UpperCamelCase : Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "max_length" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) UpperCamelCase : Tuple = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) UpperCamelCase : Union[str, Any] = is_batched_numpy or ( isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCamelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ): UpperCamelCase : int = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCamelCase : Optional[int] = [np.asarray([raw_speech] ).T] UpperCamelCase : Optional[int] = BatchFeature({'''input_features''': raw_speech} ) # convert into correct format for padding UpperCamelCase : Optional[Any] = self.pad( __SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: UpperCamelCase : Optional[Any] = self.zero_mean_unit_var_norm( padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , ) UpperCamelCase : List[str] = np.stack(padded_inputs['''input_features'''] , axis=0 ) # make sure list is in array format UpperCamelCase : Dict = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 ) UpperCamelCase : Tuple = [self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]] if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] else: UpperCamelCase : Dict = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) UpperCamelCase : Union[str, Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length] if return_tensors is not None: UpperCamelCase : Dict = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE ) return padded_inputs def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ ) UpperCamelCase : List[str] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
643
0
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __UpperCAmelCase : Any = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCAmelCase_ ( datasets.BuilderConfig): '''simple docstring''' __UpperCamelCase : int = 10000 __UpperCamelCase : Optional[List[str]] = None __UpperCamelCase : Optional[datasets.Features] = None class UpperCAmelCase_ ( datasets.ArrowBasedBuilder): '''simple docstring''' __UpperCamelCase : str = ParquetConfig def _lowercase ( self ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) a_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ): a_ : Tuple = data_files if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): a_ : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a_ : Union[str, Any] = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] a_ : int = [] for split_name, files in data_files.items(): if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): a_ : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a_ : List[str] = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ): with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as f: a_ : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(__SCREAMING_SNAKE_CASE ) ) break splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) ) return splits def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a_ : List[Any] = table_cast(__SCREAMING_SNAKE_CASE , self.info.features.arrow_schema ) return pa_table def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" a_ : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ): with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as f: a_ : List[Any] = pq.ParquetFile(__SCREAMING_SNAKE_CASE ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): a_ : int = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"""{file_idx}_{batch_idx}""", self._cast_table(__SCREAMING_SNAKE_CASE ) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(__SCREAMING_SNAKE_CASE )}: {e}""" ) raise
718
import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer __UpperCAmelCase : Dict = logging.get_logger(__name__) __UpperCAmelCase : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __UpperCAmelCase : Dict = { "vocab_file": { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt" ), } } __UpperCAmelCase : Tuple = { "junnyu/roformer_chinese_small": 1536, "junnyu/roformer_chinese_base": 1536, "junnyu/roformer_chinese_char_small": 512, "junnyu/roformer_chinese_char_base": 512, "junnyu/roformer_small_discriminator": 128, "junnyu/roformer_small_generator": 128, } __UpperCAmelCase : Any = { "junnyu/roformer_chinese_small": {"do_lower_case": True}, "junnyu/roformer_chinese_base": {"do_lower_case": True}, "junnyu/roformer_chinese_char_small": {"do_lower_case": True}, "junnyu/roformer_chinese_char_base": {"do_lower_case": True}, "junnyu/roformer_small_discriminator": {"do_lower_case": True}, "junnyu/roformer_small_generator": {"do_lower_case": True}, } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : str = VOCAB_FILES_NAMES __UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : int = PRETRAINED_INIT_CONFIGURATION __UpperCamelCase : Any = RoFormerTokenizer def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE ) != do_lower_case or pre_tok_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE ) != strip_accents ): UpperCamelCase : List[Any] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) ) UpperCamelCase : Optional[int] = do_lower_case UpperCamelCase : Optional[Any] = strip_accents UpperCamelCase : List[Any] = pre_tok_class(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = do_lower_case def __getstate__( self ): """simple docstring""" UpperCamelCase : Optional[int] = self.__dict__.copy() UpperCamelCase : Any = BertPreTokenizer() return state def __setstate__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = d UpperCamelCase : List[str] = self.__dict__['''_tokenizer'''].get_vocab() UpperCamelCase : Any = PreTokenizer.custom(JiebaPreTokenizer(__SCREAMING_SNAKE_CASE ) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" UpperCamelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : Dict = [self.sep_token_id] UpperCamelCase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : List[Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE ) return tuple(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : Any = BertPreTokenizer() return super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
643
0
import enum import shutil import sys __UpperCAmelCase : Union[str, Any] = shutil.get_terminal_size() __UpperCAmelCase : Tuple = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"} class UpperCAmelCase_ ( enum.Enum): '''simple docstring''' __UpperCamelCase : List[Any] = 0 __UpperCamelCase : Tuple = 1 def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]="" ): """simple docstring""" sys.stdout.write(str(SCREAMING_SNAKE_CASE_ ) + end ) sys.stdout.flush() def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int="" ): """simple docstring""" forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , SCREAMING_SNAKE_CASE_ ) def a ( ): """simple docstring""" forceWrite('''\r''' ) def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" ) def a ( ): """simple docstring""" forceWrite(''' ''' * TERMINAL_WIDTH ) reset_cursor() def a ( ): """simple docstring""" reset_cursor() forceWrite('''-''' * TERMINAL_WIDTH )
719
from __future__ import annotations def a ( SCREAMING_SNAKE_CASE_ : list[int] ): """simple docstring""" if len(SCREAMING_SNAKE_CASE_ ) == 0: return array UpperCamelCase , UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ ) # Compute the variables UpperCamelCase : Union[str, Any] = _max - _min + 1 UpperCamelCase , UpperCamelCase : Optional[Any] = [0] * holes_range, [0] * holes_range # Make the sorting. for i in array: UpperCamelCase : Optional[int] = i - _min UpperCamelCase : Any = i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. UpperCamelCase : str = 0 for i in range(SCREAMING_SNAKE_CASE_ ): while holes_repeat[i] > 0: UpperCamelCase : List[Any] = holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase : Any = input("Enter numbers separated by comma:\n") __UpperCAmelCase : int = [int(x) for x in user_input.split(",")] print(pigeon_sort(unsorted))
643
0
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase_ ( _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : str = DebertaTokenizer __UpperCamelCase : Optional[int] = True __UpperCamelCase : Optional[int] = DebertaTokenizerFast def _lowercase ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCamelCase : Optional[int] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] UpperCamelCase : Tuple = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) UpperCamelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] UpperCamelCase : List[Any] = {'''unk_token''': '''[UNK]'''} UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) ) def _lowercase ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = '''lower newer''' UpperCamelCase : Union[str, Any] = '''lower newer''' return input_text, output_text def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.get_tokenizer() UpperCamelCase : int = '''lower newer''' UpperCamelCase : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] UpperCamelCase : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token] UpperCamelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = self.get_tokenizer() UpperCamelCase : Optional[Any] = tokenizer('''Hello''' , '''World''' ) UpperCamelCase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE ) @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) UpperCamelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = tokenizer.encode( '''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) UpperCamelCase : str = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] UpperCamelCase : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']] # fmt: off UpperCamelCase : int = { '''input_ids''': [ [1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on UpperCamelCase : List[str] = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE ) for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
720
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter __UpperCAmelCase : List[Any] = True except ImportError: __UpperCAmelCase : List[str] = False __UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name def a ( SCREAMING_SNAKE_CASE_ : Namespace ): """simple docstring""" return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class UpperCAmelCase_ ( _a): '''simple docstring''' @staticmethod def _lowercase ( __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[Any] = parser.add_parser('''add-new-model''' ) add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' ) add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''' ) add_new_model_parser.add_argument( '''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''' ) add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE ) def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = testing UpperCamelCase : Any = testing_file UpperCamelCase : Dict = path def _lowercase ( self ): """simple docstring""" warnings.warn( '''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ''' '''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ''' '''checks, you should use `transformers-cli add-new-model-like` instead.''' ) if not _has_cookiecutter: raise ImportError( '''Model creation dependencies are required to use the `add_new_model` command. Install them by running ''' '''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory UpperCamelCase : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]] if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError( '''Several directories starting with `cookiecutter-template-` in current working directory. ''' '''Please clean your directory by removing all folders starting with `cookiecutter-template-` or ''' '''change your working directory.''' ) UpperCamelCase : Dict = ( Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) UpperCamelCase : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model''' # Execute cookiecutter if not self._testing: cookiecutter(str(__SCREAMING_SNAKE_CASE ) ) else: with open(self._testing_file , '''r''' ) as configuration_file: UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0] # Retrieve configuration with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file: UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = configuration['''lowercase_modelname'''] UpperCamelCase : int = configuration['''generate_tensorflow_pytorch_and_flax'''] os.remove(f"""{directory}/configuration.json""" ) UpperCamelCase : str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax UpperCamelCase : Any = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax UpperCamelCase : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax UpperCamelCase : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}""" os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__SCREAMING_SNAKE_CASE ) # Tests require submodules as they have parent imports with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ): pass shutil.move( f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , ) shutil.move( f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , ) def remove_copy_lines(__SCREAMING_SNAKE_CASE ): with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f: UpperCamelCase : Any = f.readlines() with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(__SCREAMING_SNAKE_CASE ) if output_pytorch: if not self._testing: remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" ) os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" ) if output_tensorflow: if not self._testing: remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" ) os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" ) if output_flax: if not self._testing: remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" ) os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , ) shutil.move( f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # Create temp file UpperCamelCase , UpperCamelCase : Optional[Any] = mkstemp() UpperCamelCase : Tuple = False with fdopen(__SCREAMING_SNAKE_CASE , '''w''' ) as new_file: with open(__SCREAMING_SNAKE_CASE ) as old_file: for line in old_file: new_file.write(__SCREAMING_SNAKE_CASE ) if line_to_copy_below in line: UpperCamelCase : Optional[int] = True for line_to_copy in lines_to_copy: new_file.write(__SCREAMING_SNAKE_CASE ) if not line_found: raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" ) # Copy the file permissions from the old file to the new file copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Remove original file remove(__SCREAMING_SNAKE_CASE ) # Move new file move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def skip_units(__SCREAMING_SNAKE_CASE ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(__SCREAMING_SNAKE_CASE ): with open(__SCREAMING_SNAKE_CASE ) as datafile: UpperCamelCase : int = [] UpperCamelCase : Dict = False UpperCamelCase : List[Any] = False for line in datafile: if "# To replace in: " in line and "##" not in line: UpperCamelCase : Dict = line.split('''"''' )[1] UpperCamelCase : int = skip_units(__SCREAMING_SNAKE_CASE ) elif "# Below: " in line and "##" not in line: UpperCamelCase : Dict = line.split('''"''' )[1] UpperCamelCase : List[str] = skip_units(__SCREAMING_SNAKE_CASE ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = [] elif "# Replace with" in line and "##" not in line: UpperCamelCase : Tuple = [] elif "##" not in line: lines_to_copy.append(__SCREAMING_SNAKE_CASE ) remove(__SCREAMING_SNAKE_CASE ) replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" ) os.rmdir(__SCREAMING_SNAKE_CASE )
643
0
from __future__ import annotations def a ( SCREAMING_SNAKE_CASE_ : list[float] ): """simple docstring""" UpperCamelCase : Tuple = 0.00 UpperCamelCase : Any = 0 for resistor in resistors: if resistor <= 0: UpperCamelCase : List[Any] = F"""Resistor at index {index} has a negative or zero value!""" raise ValueError(SCREAMING_SNAKE_CASE_ ) first_sum += 1 / float(SCREAMING_SNAKE_CASE_ ) index += 1 return 1 / first_sum def a ( SCREAMING_SNAKE_CASE_ : list[float] ): """simple docstring""" UpperCamelCase : Dict = 0.00 UpperCamelCase : Dict = 0 for resistor in resistors: sum_r += resistor if resistor < 0: UpperCamelCase : Optional[Any] = F"""Resistor at index {index} has a negative value!""" raise ValueError(SCREAMING_SNAKE_CASE_ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
721
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase : str = cva.getAffineTransform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return cva.warpAffine(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (rows, cols) ) if __name__ == "__main__": # read original image __UpperCAmelCase : Tuple = cva.imread( str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg") ) # turn image in gray scale value __UpperCAmelCase : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape __UpperCAmelCase , __UpperCAmelCase : Tuple = gray_img.shape # set different points to rotate image __UpperCAmelCase : Optional[int] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) __UpperCAmelCase : Optional[int] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) __UpperCAmelCase : Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) __UpperCAmelCase : int = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list __UpperCAmelCase : Union[str, Any] = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations __UpperCAmelCase : List[str] = plt.figure(1) __UpperCAmelCase : Dict = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, "gray") plt.title(titles[i]) plt.axis("off") plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
643
0
import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets __UpperCAmelCase : Optional[int] = datasets.logging.get_logger(__name__) __UpperCAmelCase : List[str] = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n" __UpperCAmelCase : Any = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n" __UpperCAmelCase : List[Any] = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n" __UpperCAmelCase : Dict = { "bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip", "bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip", "bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip", "bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip", "bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip", "bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip", "BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip", "BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip", "BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip", "BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip", } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class UpperCAmelCase_ ( datasets.Metric): '''simple docstring''' def _lowercase ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if self.config_name == "default": logger.warning( '''Using default BLEURT-Base checkpoint for sequence maximum length 128. ''' '''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' ) UpperCamelCase : int = '''bleurt-base-128''' if self.config_name.lower() in CHECKPOINT_URLS: UpperCamelCase : Optional[Any] = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: UpperCamelCase : Optional[int] = self.config_name.upper() else: raise KeyError( f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" ) # download the model checkpoint specified by self.config_name and set up the scorer UpperCamelCase : Union[str, Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) UpperCamelCase : Optional[Any] = score.BleurtScorer(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Union[str, Any] = self.scorer.score(references=__SCREAMING_SNAKE_CASE , candidates=__SCREAMING_SNAKE_CASE ) return {"scores": scores}
700
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) __UpperCAmelCase : List[str] = { "microsoft/conditional-detr-resnet-50": ( "https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json" ), } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Optional[int] = "conditional_detr" __UpperCamelCase : Optional[Any] = ["past_key_values"] __UpperCamelCase : Union[str, Any] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=300 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.25 , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) UpperCamelCase : str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : Tuple = backbone_config.get('''model_type''' ) UpperCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type] UpperCamelCase : Any = config_class.from_dict(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = use_timm_backbone UpperCamelCase : int = backbone_config UpperCamelCase : Any = num_channels UpperCamelCase : Optional[Any] = num_queries UpperCamelCase : Tuple = d_model UpperCamelCase : Optional[Any] = encoder_ffn_dim UpperCamelCase : Optional[int] = encoder_layers UpperCamelCase : Union[str, Any] = encoder_attention_heads UpperCamelCase : Optional[Any] = decoder_ffn_dim UpperCamelCase : Optional[int] = decoder_layers UpperCamelCase : Optional[Any] = decoder_attention_heads UpperCamelCase : Any = dropout UpperCamelCase : List[Any] = attention_dropout UpperCamelCase : List[Any] = activation_dropout UpperCamelCase : List[str] = activation_function UpperCamelCase : Optional[int] = init_std UpperCamelCase : Optional[Any] = init_xavier_std UpperCamelCase : Union[str, Any] = encoder_layerdrop UpperCamelCase : Optional[Any] = decoder_layerdrop UpperCamelCase : Tuple = encoder_layers UpperCamelCase : Optional[Any] = auxiliary_loss UpperCamelCase : Union[str, Any] = position_embedding_type UpperCamelCase : Optional[int] = backbone UpperCamelCase : Dict = use_pretrained_backbone UpperCamelCase : Tuple = dilation # Hungarian matcher UpperCamelCase : Union[str, Any] = class_cost UpperCamelCase : List[Any] = bbox_cost UpperCamelCase : Optional[Any] = giou_cost # Loss coefficients UpperCamelCase : Optional[Any] = mask_loss_coefficient UpperCamelCase : Optional[int] = dice_loss_coefficient UpperCamelCase : Optional[Any] = cls_loss_coefficient UpperCamelCase : Optional[int] = bbox_loss_coefficient UpperCamelCase : Optional[int] = giou_loss_coefficient UpperCamelCase : Optional[int] = focal_alpha super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @property def _lowercase ( self ): """simple docstring""" return self.encoder_attention_heads @property def _lowercase ( self ): """simple docstring""" return self.d_model def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: UpperCamelCase : List[Any] = self.backbone_config.to_dict() UpperCamelCase : List[Any] = self.__class__.model_type return output class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Dict = version.parse("1.11") @property def _lowercase ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def _lowercase ( self ): """simple docstring""" return 1e-5 @property def _lowercase ( self ): """simple docstring""" return 12
643
0
from numpy import exp, pi, sqrt def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : float = 1.0 ): """simple docstring""" return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
701
import requests from bsa import BeautifulSoup def a ( SCREAMING_SNAKE_CASE_ : str = "AAPL" ): """simple docstring""" UpperCamelCase : Dict = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}""" UpperCamelCase : Any = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' ) UpperCamelCase : Dict = '''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
643
0
import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) __UpperCAmelCase : List[str] = logging.getLogger(__name__) class UpperCAmelCase_ ( _a): '''simple docstring''' def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" UpperCamelCase : int = self.layer[current_layer](__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , head_mask[current_layer] ) UpperCamelCase : List[str] = layer_outputs[0] return hidden_states @add_start_docstrings( "The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.", _a, ) class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = BertEncoderWithPabee(__SCREAMING_SNAKE_CASE ) self.init_weights() UpperCamelCase : Optional[Any] = 0 UpperCamelCase : Optional[Any] = 0 UpperCamelCase : List[Any] = 0 UpperCamelCase : List[str] = 0 def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : str = threshold def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : str = patience def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = 0 UpperCamelCase : Dict = 0 def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = self.inference_layers_num / self.inference_instances_num UpperCamelCase : Any = ( f"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =""" f""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***""" ) print(__SCREAMING_SNAKE_CASE ) @add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , ): """simple docstring""" if input_ids is not None and inputs_embeds is not None: raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' ) elif input_ids is not None: UpperCamelCase : List[str] = input_ids.size() elif inputs_embeds is not None: UpperCamelCase : str = inputs_embeds.size()[:-1] else: raise ValueError('''You have to specify either input_ids or inputs_embeds''' ) UpperCamelCase : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: UpperCamelCase : Union[str, Any] = torch.ones(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) if token_type_ids is None: UpperCamelCase : Any = torch.zeros(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: UpperCamelCase : Union[str, Any] = encoder_hidden_states.size() UpperCamelCase : List[Any] = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: UpperCamelCase : Any = torch.ones(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = self.invert_attention_mask(__SCREAMING_SNAKE_CASE ) else: UpperCamelCase : Any = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] UpperCamelCase : List[Any] = self.get_head_mask(__SCREAMING_SNAKE_CASE , self.config.num_hidden_layers ) UpperCamelCase : int = self.embeddings( input_ids=__SCREAMING_SNAKE_CASE , position_ids=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , inputs_embeds=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = embedding_output if self.training: UpperCamelCase : Any = [] for i in range(self.config.num_hidden_layers ): UpperCamelCase : Dict = self.encoder.adaptive_forward( __SCREAMING_SNAKE_CASE , current_layer=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = self.pooler(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = output_layers[i](output_dropout(__SCREAMING_SNAKE_CASE ) ) res.append(__SCREAMING_SNAKE_CASE ) elif self.patience == 0: # Use all layers for inference UpperCamelCase : int = self.encoder( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Dict = self.pooler(encoder_outputs[0] ) UpperCamelCase : Optional[Any] = [output_layers[self.config.num_hidden_layers - 1](__SCREAMING_SNAKE_CASE )] else: UpperCamelCase : Optional[Any] = 0 UpperCamelCase : Optional[int] = None UpperCamelCase : int = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 UpperCamelCase : Optional[int] = self.encoder.adaptive_forward( __SCREAMING_SNAKE_CASE , current_layer=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = self.pooler(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = output_layers[i](__SCREAMING_SNAKE_CASE ) if regression: UpperCamelCase : Optional[Any] = logits.detach() if patient_result is not None: UpperCamelCase : Dict = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: UpperCamelCase : Optional[int] = 0 else: UpperCamelCase : int = logits.detach().argmax(dim=1 ) if patient_result is not None: UpperCamelCase : List[Any] = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(__SCREAMING_SNAKE_CASE ) ): patient_counter += 1 else: UpperCamelCase : str = 0 UpperCamelCase : int = logits if patient_counter == self.patience: break UpperCamelCase : Tuple = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( "Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ", _a, ) class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = config.num_labels UpperCamelCase : List[Any] = BertModelWithPabee(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob ) UpperCamelCase : Optional[int] = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ): """simple docstring""" UpperCamelCase : Tuple = self.bert( input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , position_ids=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , inputs_embeds=__SCREAMING_SNAKE_CASE , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) UpperCamelCase : Any = (logits[-1],) if labels is not None: UpperCamelCase : Dict = None UpperCamelCase : int = 0 for ix, logits_item in enumerate(__SCREAMING_SNAKE_CASE ): if self.num_labels == 1: # We are doing regression UpperCamelCase : Any = MSELoss() UpperCamelCase : List[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: UpperCamelCase : List[Any] = CrossEntropyLoss() UpperCamelCase : int = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: UpperCamelCase : Any = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 UpperCamelCase : List[str] = (total_loss / total_weights,) + outputs return outputs
702
def a ( SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" if number > 0: raise ValueError('''input must be a negative integer''' ) UpperCamelCase : List[str] = len(bin(SCREAMING_SNAKE_CASE_ )[3:] ) UpperCamelCase : List[str] = bin(abs(SCREAMING_SNAKE_CASE_ ) - (1 << binary_number_length) )[3:] UpperCamelCase : Dict = ( ( '''1''' + '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE_ )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
643
0
class UpperCAmelCase_ : '''simple docstring''' def __init__( self ): """simple docstring""" UpperCamelCase : Optional[int] = {} def _lowercase ( self ): """simple docstring""" print(self.vertex ) for i in self.vertex: print(__SCREAMING_SNAKE_CASE , ''' -> ''' , ''' -> '''.join([str(__SCREAMING_SNAKE_CASE ) for j in self.vertex[i]] ) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" if from_vertex in self.vertex: self.vertex[from_vertex].append(__SCREAMING_SNAKE_CASE ) else: # else make a new vertex UpperCamelCase : int = [to_vertex] def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = True print(__SCREAMING_SNAKE_CASE , end=''' ''' ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __UpperCAmelCase : Dict = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("DFS:") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
703
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase : str = logging.get_logger(__name__) __UpperCAmelCase : Dict = { "hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Optional[int] = "yolos" def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=[512, 864] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = hidden_size UpperCamelCase : List[Any] = num_hidden_layers UpperCamelCase : int = num_attention_heads UpperCamelCase : Dict = intermediate_size UpperCamelCase : Dict = hidden_act UpperCamelCase : int = hidden_dropout_prob UpperCamelCase : Any = attention_probs_dropout_prob UpperCamelCase : Optional[Any] = initializer_range UpperCamelCase : List[Any] = layer_norm_eps UpperCamelCase : int = image_size UpperCamelCase : Any = patch_size UpperCamelCase : str = num_channels UpperCamelCase : str = qkv_bias UpperCamelCase : Tuple = num_detection_tokens UpperCamelCase : List[Any] = use_mid_position_embeddings UpperCamelCase : Dict = auxiliary_loss # Hungarian matcher UpperCamelCase : Optional[Any] = class_cost UpperCamelCase : Union[str, Any] = bbox_cost UpperCamelCase : Any = giou_cost # Loss coefficients UpperCamelCase : List[Any] = bbox_loss_coefficient UpperCamelCase : Union[str, Any] = giou_loss_coefficient UpperCamelCase : Dict = eos_coefficient class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[str] = version.parse("1.11") @property def _lowercase ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _lowercase ( self ): """simple docstring""" return 1e-4 @property def _lowercase ( self ): """simple docstring""" return 12
643
0
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): UpperCamelCase : Union[str, Any] = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = '''sshleifer/tiny-gpt2''' UpperCamelCase : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Optional[int] = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = '''sgugger/tiny-distilbert-classification''' UpperCamelCase : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , only_pretrain_model=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[Any] = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = '''sshleifer/tiny-gpt2''' UpperCamelCase : List[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , torchscript=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[Any] = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = '''sshleifer/tiny-gpt2''' UpperCamelCase : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[str] = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = '''sshleifer/tiny-gpt2''' UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) # set architectures equal to `None` UpperCamelCase : Optional[int] = None UpperCamelCase : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : str = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] ) UpperCamelCase : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = '''sshleifer/tiny-gpt2''' UpperCamelCase : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : str = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = '''sshleifer/tiny-gpt2''' UpperCamelCase : List[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__SCREAMING_SNAKE_CASE , multi_process=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Optional[Any] = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = '''sshleifer/tiny-gpt2''' UpperCamelCase : str = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : int = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] ) UpperCamelCase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = '''sshleifer/tinier_bart''' UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[Any] = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] ) UpperCamelCase : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = '''sshleifer/tiny-gpt2''' UpperCamelCase : Any = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Tuple = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] ) UpperCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = '''sshleifer/tinier_bart''' UpperCamelCase : Any = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Optional[Any] = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] ) UpperCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , save_to_csv=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , '''train_time.csv''' ) , env_info_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , '''env.csv''' ) , multi_process=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Tuple = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) benchmark.run() self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , '''env.csv''' ) ).exists() ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(__SCREAMING_SNAKE_CASE ): self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''sequential''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''cumulative''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''current''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__SCREAMING_SNAKE_CASE , '''log.txt''' ) , log_print=__SCREAMING_SNAKE_CASE , trace_memory_line_by_line=__SCREAMING_SNAKE_CASE , multi_process=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[Any] = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , '''log.txt''' ) ).exists() )
704
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) UpperCamelCase : int = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" UpperCamelCase : List[str] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] UpperCamelCase : Tuple = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
643
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __UpperCAmelCase : Tuple = logging.get_logger(__name__) __UpperCAmelCase : Dict = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" for attribute in key.split('''.''' ): UpperCamelCase : Any = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if weight_type is not None: UpperCamelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape else: UpperCamelCase : Dict = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCamelCase : Optional[Any] = value elif weight_type == "weight_g": UpperCamelCase : int = value elif weight_type == "weight_v": UpperCamelCase : Tuple = value elif weight_type == "bias": UpperCamelCase : str = value else: UpperCamelCase : str = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" UpperCamelCase : Any = [] UpperCamelCase : Optional[int] = fairseq_model.state_dict() UpperCamelCase : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase : Optional[int] = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == '''group''' , ) UpperCamelCase : Optional[int] = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase : List[str] = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: UpperCamelCase : str = True if "*" in mapped_key: UpperCamelCase : Optional[Any] = name.split(SCREAMING_SNAKE_CASE_ )[0].split('''.''' )[-2] UpperCamelCase : str = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE_ ) if "weight_g" in name: UpperCamelCase : Optional[int] = '''weight_g''' elif "weight_v" in name: UpperCamelCase : Optional[int] = '''weight_v''' elif "weight" in name: UpperCamelCase : Tuple = '''weight''' elif "bias" in name: UpperCamelCase : Dict = '''bias''' else: UpperCamelCase : int = None set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE_ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ): """simple docstring""" UpperCamelCase : int = full_name.split('''conv_layers.''' )[-1] UpperCamelCase : Optional[int] = name.split('''.''' ) UpperCamelCase : Optional[int] = int(items[0] ) UpperCamelCase : Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCamelCase : Any = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCamelCase : List[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) UpperCamelCase : List[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCamelCase : Optional[int] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ): """simple docstring""" UpperCamelCase : Union[str, Any] = SEWConfig() if is_finetuned: UpperCamelCase : Any = model.wav_encoder.wav_model.cfg else: UpperCamelCase : List[str] = model.cfg UpperCamelCase : str = fs_config.conv_bias UpperCamelCase : Any = eval(fs_config.conv_feature_layers ) UpperCamelCase : Optional[int] = [x[0] for x in conv_layers] UpperCamelCase : Optional[int] = [x[1] for x in conv_layers] UpperCamelCase : str = [x[2] for x in conv_layers] UpperCamelCase : Union[str, Any] = '''gelu''' UpperCamelCase : int = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' UpperCamelCase : Optional[Any] = 0.0 UpperCamelCase : Union[str, Any] = fs_config.activation_fn.name UpperCamelCase : int = fs_config.encoder_embed_dim UpperCamelCase : Union[str, Any] = 0.02 UpperCamelCase : Union[str, Any] = fs_config.encoder_ffn_embed_dim UpperCamelCase : str = 1E-5 UpperCamelCase : int = fs_config.encoder_layerdrop UpperCamelCase : List[str] = fs_config.encoder_attention_heads UpperCamelCase : Union[str, Any] = fs_config.conv_pos_groups UpperCamelCase : Union[str, Any] = fs_config.conv_pos UpperCamelCase : Dict = len(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = fs_config.encoder_layers UpperCamelCase : int = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: UpperCamelCase : List[Any] = model.cfg UpperCamelCase : Optional[int] = fs_config.final_dropout UpperCamelCase : Optional[Any] = fs_config.layerdrop UpperCamelCase : Any = fs_config.activation_dropout UpperCamelCase : Union[str, Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 UpperCamelCase : Tuple = fs_config.attention_dropout UpperCamelCase : Optional[Any] = fs_config.dropout_input UpperCamelCase : Optional[int] = fs_config.dropout UpperCamelCase : List[str] = fs_config.mask_channel_length UpperCamelCase : Union[str, Any] = fs_config.mask_channel_prob UpperCamelCase : List[Any] = fs_config.mask_length UpperCamelCase : List[str] = fs_config.mask_prob UpperCamelCase : Any = '''Wav2Vec2FeatureExtractor''' UpperCamelCase : List[str] = '''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : str=True ): """simple docstring""" if is_finetuned: UpperCamelCase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: UpperCamelCase : Union[str, Any] = SEWConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase : Dict = convert_config(model[0] , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = model[0].eval() UpperCamelCase : Optional[Any] = True if config.feat_extract_norm == '''layer''' else False UpperCamelCase : Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ) if is_finetuned: if dict_path: UpperCamelCase : List[str] = Dictionary.load(SCREAMING_SNAKE_CASE_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase : List[Any] = target_dict.pad_index UpperCamelCase : List[str] = target_dict.bos_index UpperCamelCase : Optional[Any] = target_dict.pad_index UpperCamelCase : List[Any] = target_dict.bos_index UpperCamelCase : Optional[Any] = target_dict.eos_index UpperCamelCase : List[Any] = len(target_dict.symbols ) UpperCamelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , '''vocab.json''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE_ ) ) return os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = SEWForCTC(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase : Any = SEWModel(SCREAMING_SNAKE_CASE_ ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ ) recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": __UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __UpperCAmelCase : List[str] = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
705
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ): """simple docstring""" UpperCamelCase : List[str] = k_size // 2 UpperCamelCase , UpperCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center] UpperCamelCase : Dict = 1 / (2 * pi * sigma) * exp(-(square(SCREAMING_SNAKE_CASE_ ) + square(SCREAMING_SNAKE_CASE_ )) / (2 * square(SCREAMING_SNAKE_CASE_ )) ) return g def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase , UpperCamelCase : Tuple = image.shape[0], image.shape[1] # dst image height and width UpperCamelCase : str = height - k_size + 1 UpperCamelCase : Optional[int] = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows UpperCamelCase : List[Any] = zeros((dst_height * dst_width, k_size * k_size) ) UpperCamelCase : Tuple = 0 for i, j in product(range(SCREAMING_SNAKE_CASE_ ) , range(SCREAMING_SNAKE_CASE_ ) ): UpperCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] ) UpperCamelCase : Dict = window row += 1 # turn the kernel into shape(k*k, 1) UpperCamelCase : Optional[int] = gen_gaussian_kernel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = ravel(SCREAMING_SNAKE_CASE_ ) # reshape and get the dst image UpperCamelCase : Optional[int] = dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ ) return dst if __name__ == "__main__": # read original image __UpperCAmelCase : Union[str, Any] = imread(r"../image_data/lena.jpg") # turn image in gray scale value __UpperCAmelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size __UpperCAmelCase : Optional[int] = gaussian_filter(gray, 3, sigma=1) __UpperCAmelCase : List[Any] = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("gaussian filter with 3x3 mask", gaussianaxa) imshow("gaussian filter with 5x5 mask", gaussianaxa) waitKey()
643
0
'''simple docstring''' import collections import importlib.util import os import re from pathlib import Path __UpperCAmelCase : Any = "src/transformers" # Matches is_xxx_available() __UpperCAmelCase : List[Any] = re.compile(r"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} __UpperCAmelCase : List[str] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __UpperCAmelCase : Any = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available __UpperCAmelCase : List[str] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") __UpperCAmelCase : Tuple = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __UpperCAmelCase : Optional[int] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", __UpperCAmelCase : str = re.compile("^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], __UpperCAmelCase : int = re.compile("^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo __UpperCAmelCase : Any = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: __UpperCAmelCase : List[str] = re.compile(r"^\s*try:") # Catches a line with else: __UpperCAmelCase : Any = re.compile(r"^\s*else:") def a ( SCREAMING_SNAKE_CASE_ : List[str] ): """simple docstring""" if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None: return None UpperCamelCase : Tuple = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )] backends.sort() return "_and_".join(SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : Dict ): """simple docstring""" with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCamelCase : Any = f.readlines() UpperCamelCase : Dict = 0 while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(SCREAMING_SNAKE_CASE_ ): return None # First grab the objects without a specific backend in _import_structure UpperCamelCase : List[Any] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: UpperCamelCase : Any = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : int = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0] UpperCamelCase : int = re.findall('''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue UpperCamelCase : Union[str, Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: UpperCamelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 UpperCamelCase : int = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCamelCase : int = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCamelCase : List[str] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCamelCase : List[str] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): UpperCamelCase : Dict = lines[line_index] if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None: objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] ) elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None: UpperCamelCase : List[Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' ) UpperCamelCase : Union[str, Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None: UpperCamelCase : Optional[Any] = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' ) UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None: objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 1_2 + '''"''' ): objects.append(line[1_3:-3] ) line_index += 1 UpperCamelCase : Any = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCamelCase : int = [] while ( line_index < len(SCREAMING_SNAKE_CASE_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): UpperCamelCase : Union[str, Any] = lines[line_index] UpperCamelCase : Union[str, Any] = _re_import.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCamelCase : Optional[Any] = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(SCREAMING_SNAKE_CASE_ ): # If the line is an if is_backend_available, we grab all objects associated. UpperCamelCase : Dict = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCamelCase : str = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCamelCase : Union[str, Any] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): UpperCamelCase : Union[str, Any] = lines[line_index] UpperCamelCase : Optional[Any] = _re_import.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 1_2 ): objects.append(line[1_2:-2] ) line_index += 1 UpperCamelCase : Any = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict ): """simple docstring""" def find_duplicates(SCREAMING_SNAKE_CASE_ : List[Any] ): return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCamelCase : Union[str, Any] = [] for key in import_dict_objects.keys(): UpperCamelCase : Optional[int] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) UpperCamelCase : List[str] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCamelCase : Optional[Any] = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def a ( ): """simple docstring""" UpperCamelCase : List[Any] = [] for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ): if "__init__.py" in files: UpperCamelCase : str = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) UpperCamelCase : List[str] = parse_init(SCREAMING_SNAKE_CASE_ ) if objects is not None: UpperCamelCase : Union[str, Any] = analyze_results(*SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: UpperCamelCase : Union[str, Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) if len(SCREAMING_SNAKE_CASE_ ) > 0: raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) ) def a ( ): """simple docstring""" UpperCamelCase : List[Any] = [] for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(SCREAMING_SNAKE_CASE_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0: continue UpperCamelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : Optional[int] = short_path.replace(os.path.sep , '''.''' ) submodules.append(SCREAMING_SNAKE_CASE_ ) for fname in files: if fname == "__init__.py": continue UpperCamelCase : List[str] = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : Union[str, Any] = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(SCREAMING_SNAKE_CASE_ ) return submodules __UpperCAmelCase : Union[str, Any] = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", ] def a ( ): """simple docstring""" UpperCamelCase : Optional[Any] = importlib.util.spec_from_file_location( '''transformers''' , os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) UpperCamelCase : Union[str, Any] = spec.loader.load_module() UpperCamelCase : Dict = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(SCREAMING_SNAKE_CASE_ ) > 0: UpperCamelCase : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registered in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
706
from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def a ( SCREAMING_SNAKE_CASE_ : bool = True , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" if not is_tqdm_available(): raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' ) UpperCamelCase : int = False if main_process_only: UpperCamelCase : int = PartialState().local_process_index == 0 return _tqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , disable=SCREAMING_SNAKE_CASE_ )
643
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase : Dict = logging.get_logger(__name__) __UpperCAmelCase : Dict = { "xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json", "xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json", "xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json", "xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json", "xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json", "xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json", "xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json", "xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json", "xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json", "xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json", } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Tuple = "xlm" __UpperCamelCase : List[Any] = { "hidden_size": "emb_dim", "num_attention_heads": "n_heads", "num_hidden_layers": "n_layers", "n_words": "vocab_size", # For backward compatibility } def __init__( self , __SCREAMING_SNAKE_CASE=30_145 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2_048**-0.5 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="first" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : str = vocab_size UpperCamelCase : int = emb_dim UpperCamelCase : List[str] = n_layers UpperCamelCase : Any = n_heads UpperCamelCase : List[str] = dropout UpperCamelCase : Optional[Any] = attention_dropout UpperCamelCase : List[Any] = gelu_activation UpperCamelCase : Optional[Any] = sinusoidal_embeddings UpperCamelCase : Union[str, Any] = causal UpperCamelCase : Tuple = asm UpperCamelCase : Union[str, Any] = n_langs UpperCamelCase : int = use_lang_emb UpperCamelCase : int = layer_norm_eps UpperCamelCase : str = bos_index UpperCamelCase : List[str] = eos_index UpperCamelCase : int = pad_index UpperCamelCase : Optional[int] = unk_index UpperCamelCase : Dict = mask_index UpperCamelCase : int = is_encoder UpperCamelCase : Any = max_position_embeddings UpperCamelCase : Tuple = embed_init_std UpperCamelCase : Optional[Any] = init_std UpperCamelCase : Union[str, Any] = summary_type UpperCamelCase : int = summary_use_proj UpperCamelCase : Optional[int] = summary_activation UpperCamelCase : Dict = summary_proj_to_labels UpperCamelCase : Union[str, Any] = summary_first_dropout UpperCamelCase : int = start_n_top UpperCamelCase : Optional[int] = end_n_top UpperCamelCase : Optional[Any] = mask_token_id UpperCamelCase : Optional[Any] = lang_id if "n_words" in kwargs: UpperCamelCase : str = kwargs['''n_words'''] super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) class UpperCAmelCase_ ( _a): '''simple docstring''' @property def _lowercase ( self ): """simple docstring""" if self.task == "multiple-choice": UpperCamelCase : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCamelCase : Any = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
707
import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase : Any = logging.get_logger(__name__) __UpperCAmelCase : int = "▁" __UpperCAmelCase : Tuple = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"} __UpperCAmelCase : Dict = { "sentencepiece_model_file": "sentencepiece.bpe.model", "vocab_file": "vocab.txt", } __UpperCAmelCase : Dict = { "vocab_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", }, "sentencepiece_model_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", }, } __UpperCAmelCase : str = { "ernie-m-base": 514, "ernie-m-large": 514, } __UpperCAmelCase : Optional[int] = { "ernie-m-base": {"do_lower_case": False}, "ernie-m-large": {"do_lower_case": False}, } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[str] = ["input_ids"] __UpperCamelCase : List[str] = VOCAB_FILES_NAMES __UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION __UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : List[str] = RESOURCE_FILES_NAMES def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="utf8" , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , vocab_file=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[str] = do_lower_case UpperCamelCase : Dict = sentencepiece_model_ckpt UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: UpperCamelCase : Optional[Any] = self.load_vocab(filepath=__SCREAMING_SNAKE_CASE ) else: UpperCamelCase : int = {self.sp_model.id_to_piece(__SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )} UpperCamelCase : str = {v: k for k, v in self.vocab.items()} def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if text is None: return None UpperCamelCase : str = self.tokenize(__SCREAMING_SNAKE_CASE ) UpperCamelCase , UpperCamelCase : str = '''''', [] for i, ch in enumerate(__SCREAMING_SNAKE_CASE ): if ch in self.SP_CHAR_MAPPING: UpperCamelCase : Optional[int] = self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE ) else: UpperCamelCase : Optional[Any] = unicodedata.normalize('''NFKC''' , __SCREAMING_SNAKE_CASE ) if self.is_whitespace(__SCREAMING_SNAKE_CASE ): continue normalized_text += ch char_mapping.extend([i] * len(__SCREAMING_SNAKE_CASE ) ) UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = normalized_text, [], 0 if self.do_lower_case: UpperCamelCase : Tuple = text.lower() for token in split_tokens: if token[:1] == "▁": UpperCamelCase : Any = token[1:] UpperCamelCase : Optional[int] = text[offset:].index(__SCREAMING_SNAKE_CASE ) + offset UpperCamelCase : List[Any] = start + len(__SCREAMING_SNAKE_CASE ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) UpperCamelCase : str = end return token_mapping @property def _lowercase ( self ): """simple docstring""" return len(self.vocab ) def _lowercase ( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self ): """simple docstring""" UpperCamelCase : Optional[Any] = self.__dict__.copy() UpperCamelCase : str = None return state def __setstate__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCamelCase : Optional[int] = {} UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in text) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=0.1 ): """simple docstring""" if self.sp_model_kwargs.get('''enable_sampling''' ) is True: UpperCamelCase : List[str] = True if self.sp_model_kwargs.get('''alpha''' ) is not None: UpperCamelCase : Any = self.sp_model_kwargs.get('''alpha''' ) if self.sp_model_kwargs.get('''nbest_size''' ) is not None: UpperCamelCase : Tuple = self.sp_model_kwargs.get('''nbest_size''' ) if not enable_sampling: UpperCamelCase : int = self.sp_model.EncodeAsPieces(__SCREAMING_SNAKE_CASE ) else: UpperCamelCase : Optional[Any] = self.sp_model.SampleEncodeAsPieces(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = [] for pi, piece in enumerate(__SCREAMING_SNAKE_CASE ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__SCREAMING_SNAKE_CASE ) and pi != 0: new_pieces.append(__SCREAMING_SNAKE_CASE ) continue else: continue UpperCamelCase : Any = 0 for i, chunk in enumerate(__SCREAMING_SNAKE_CASE ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__SCREAMING_SNAKE_CASE ) or self.is_punct(__SCREAMING_SNAKE_CASE ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCamelCase : Union[str, Any] = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCamelCase : Any = i if len(__SCREAMING_SNAKE_CASE ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[int] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip() return out_string def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip() return out_string def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.reverse_vocab.get(__SCREAMING_SNAKE_CASE , self.unk_token ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase : Any = [self.cls_token_id] UpperCamelCase : str = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if token_ids_a is None: # [CLS] X [SEP] return (len(__SCREAMING_SNAKE_CASE ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(__SCREAMING_SNAKE_CASE ) + 3) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__SCREAMING_SNAKE_CASE ) == 1: UpperCamelCase : Optional[int] = unicodedata.category(__SCREAMING_SNAKE_CASE ) if cat == "Zs": return True return False def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = {} with io.open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Tuple = line.rstrip('''\n''' ) UpperCamelCase : List[Any] = int(__SCREAMING_SNAKE_CASE ) return token_to_idx def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : Union[str, Any] = 0 if os.path.isdir(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Dict = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: UpperCamelCase : Union[str, Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) UpperCamelCase : List[Any] = token_index writer.write(token + '''\n''' ) index += 1 UpperCamelCase : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , '''sentencepiece.bpe.model''' ) with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (vocab_file,)
643
0
from math import pow, sqrt def a ( *SCREAMING_SNAKE_CASE_ : float ): """simple docstring""" UpperCamelCase : Tuple = len(SCREAMING_SNAKE_CASE_ ) > 0 and all(value > 0.0 for value in values ) return result def a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ): """simple docstring""" return ( round(sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else ValueError('''Input Error: Molar mass values must greater than 0.''' ) ) def a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ): """simple docstring""" return ( round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else ValueError( '''Input Error: Molar mass and effusion rate values must greater than 0.''' ) ) def a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ): """simple docstring""" return ( round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else ValueError( '''Input Error: Molar mass and effusion rate values must greater than 0.''' ) ) def a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ): """simple docstring""" return ( round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 ) if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else ValueError( '''Input Error: Molar mass and effusion rate values must greater than 0.''' ) ) def a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ): """simple docstring""" return ( round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 ) if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else ValueError( '''Input Error: Molar mass and effusion rate values must greater than 0.''' ) )
708
from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig __UpperCAmelCase : List[Any] = [ "openmmlab/upernet-convnext-tiny", # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring __UpperCAmelCase : List[str] = "UperNetConfig" class UpperCAmelCase_ ( nn.Module): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , ): """simple docstring""" super().__init__() UpperCamelCase : str = nn.Convad( in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : int = nn.BatchNormad(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = nn.ReLU() def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = self.conv(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = self.batch_norm(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = self.activation(__SCREAMING_SNAKE_CASE ) return output class UpperCAmelCase_ ( nn.Module): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__() UpperCamelCase : List[Any] = [ nn.AdaptiveAvgPoolad(__SCREAMING_SNAKE_CASE ), UperNetConvModule(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : str = input for layer in self.layers: UpperCamelCase : int = layer(__SCREAMING_SNAKE_CASE ) return hidden_state class UpperCAmelCase_ ( nn.Module): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__() UpperCamelCase : List[Any] = pool_scales UpperCamelCase : Dict = align_corners UpperCamelCase : Optional[int] = in_channels UpperCamelCase : Union[str, Any] = channels UpperCamelCase : List[str] = [] for i, pool_scale in enumerate(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , channels=__SCREAMING_SNAKE_CASE ) self.blocks.append(__SCREAMING_SNAKE_CASE ) self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : str = [] for ppm in self.blocks: UpperCamelCase : List[str] = ppm(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = nn.functional.interpolate( __SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners ) ppm_outs.append(__SCREAMING_SNAKE_CASE ) return ppm_outs class UpperCAmelCase_ ( nn.Module): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__() UpperCamelCase : int = config UpperCamelCase : List[str] = config.pool_scales # e.g. (1, 2, 3, 6) UpperCamelCase : Optional[int] = in_channels UpperCamelCase : str = config.hidden_size UpperCamelCase : str = False UpperCamelCase : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) # PSP Module UpperCamelCase : Optional[int] = UperNetPyramidPoolingModule( self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , ) UpperCamelCase : str = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) # FPN Module UpperCamelCase : Union[str, Any] = nn.ModuleList() UpperCamelCase : Union[str, Any] = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer UpperCamelCase : List[Any] = UperNetConvModule(__SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 ) UpperCamelCase : int = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 ) self.lateral_convs.append(__SCREAMING_SNAKE_CASE ) self.fpn_convs.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = UperNetConvModule( len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) def _lowercase ( self ): """simple docstring""" self.apply(self._init_weights ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = inputs[-1] UpperCamelCase : int = [x] psp_outs.extend(self.psp_modules(__SCREAMING_SNAKE_CASE ) ) UpperCamelCase : Any = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 ) UpperCamelCase : Union[str, Any] = self.bottleneck(__SCREAMING_SNAKE_CASE ) return output def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : str = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(__SCREAMING_SNAKE_CASE ) ) # build top-down path UpperCamelCase : int = len(__SCREAMING_SNAKE_CASE ) for i in range(used_backbone_levels - 1 , 0 , -1 ): UpperCamelCase : Optional[int] = laterals[i - 1].shape[2:] UpperCamelCase : Optional[Any] = laterals[i - 1] + nn.functional.interpolate( laterals[i] , size=__SCREAMING_SNAKE_CASE , mode='''bilinear''' , align_corners=self.align_corners ) # build outputs UpperCamelCase : str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 , 0 , -1 ): UpperCamelCase : int = nn.functional.interpolate( fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners ) UpperCamelCase : str = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 ) UpperCamelCase : Tuple = self.fpn_bottleneck(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE ) return output class UpperCAmelCase_ ( nn.Module): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 1 ): """simple docstring""" super().__init__() UpperCamelCase : Dict = config UpperCamelCase : Optional[Any] = config.auxiliary_in_channels UpperCamelCase : Union[str, Any] = config.auxiliary_channels UpperCamelCase : Union[str, Any] = config.auxiliary_num_convs UpperCamelCase : Optional[Any] = config.auxiliary_concat_input UpperCamelCase : List[str] = in_index UpperCamelCase : Any = (kernel_size // 2) * dilation UpperCamelCase : Optional[Any] = [] convs.append( UperNetConvModule( self.in_channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) ) if self.num_convs == 0: UpperCamelCase : str = nn.Identity() else: UpperCamelCase : Dict = nn.Sequential(*__SCREAMING_SNAKE_CASE ) if self.concat_input: UpperCamelCase : Union[str, Any] = UperNetConvModule( self.in_channels + self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 ) UpperCamelCase : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) def _lowercase ( self ): """simple docstring""" self.apply(self._init_weights ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = encoder_hidden_states[self.in_index] UpperCamelCase : str = self.convs(__SCREAMING_SNAKE_CASE ) if self.concat_input: UpperCamelCase : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) ) UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE ) return output class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Optional[Any] = UperNetConfig __UpperCamelCase : Optional[int] = "pixel_values" __UpperCamelCase : Dict = True def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def _lowercase ( self ): """simple docstring""" self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ): """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : str = value __UpperCAmelCase : List[Any] = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" __UpperCAmelCase : Union[str, Any] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.", _a, ) class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) UpperCamelCase : int = UperNetHead(__SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels ) UpperCamelCase : int = UperNetFCNHead(__SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) ) @replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC ) def _lowercase ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ): """simple docstring""" UpperCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase : str = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions UpperCamelCase : Tuple = self.backbone.forward_with_filtered_kwargs( __SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = outputs.feature_maps UpperCamelCase : Union[str, Any] = self.decode_head(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = nn.functional.interpolate(__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = None if self.auxiliary_head is not None: UpperCamelCase : int = self.auxiliary_head(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = nn.functional.interpolate( __SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = None if labels is not None: if self.config.num_labels == 1: raise ValueError('''The number of labels should be greater than one''' ) else: # compute weighted loss UpperCamelCase : Optional[int] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) UpperCamelCase : Tuple = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: UpperCamelCase : Optional[Any] = (logits,) + outputs[1:] else: UpperCamelCase : int = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
643
0
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) __UpperCAmelCase : str = { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json", } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Any = "xlnet" __UpperCamelCase : Optional[Any] = ["mems"] __UpperCamelCase : Dict = { "n_token": "vocab_size", # Backward compatibility "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , __SCREAMING_SNAKE_CASE=32_000 , __SCREAMING_SNAKE_CASE=1_024 , __SCREAMING_SNAKE_CASE=24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=4_096 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="bi" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=-1 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="last" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="tanh" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : Union[str, Any] = vocab_size UpperCamelCase : Dict = d_model UpperCamelCase : str = n_layer UpperCamelCase : List[Any] = n_head if d_model % n_head != 0: raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" ) UpperCamelCase : List[str] = d_model // n_head UpperCamelCase : Optional[Any] = ff_activation UpperCamelCase : Any = d_inner UpperCamelCase : Optional[int] = untie_r UpperCamelCase : str = attn_type UpperCamelCase : List[str] = initializer_range UpperCamelCase : int = layer_norm_eps UpperCamelCase : List[Any] = dropout UpperCamelCase : Dict = mem_len UpperCamelCase : Optional[Any] = reuse_len UpperCamelCase : List[Any] = bi_data UpperCamelCase : Union[str, Any] = clamp_len UpperCamelCase : str = same_length UpperCamelCase : Union[str, Any] = summary_type UpperCamelCase : List[str] = summary_use_proj UpperCamelCase : Dict = summary_activation UpperCamelCase : Union[str, Any] = summary_last_dropout UpperCamelCase : Tuple = start_n_top UpperCamelCase : int = end_n_top UpperCamelCase : Optional[int] = bos_token_id UpperCamelCase : Union[str, Any] = pad_token_id UpperCamelCase : str = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , __SCREAMING_SNAKE_CASE , ) UpperCamelCase : Any = kwargs['''use_cache'''] UpperCamelCase : Dict = use_mems_eval UpperCamelCase : Dict = use_mems_train super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @property def _lowercase ( self ): """simple docstring""" logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
709
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration __UpperCAmelCase : Optional[int] = 500000 __UpperCAmelCase , __UpperCAmelCase : Any = os.path.split(__file__) __UpperCAmelCase : int = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ): """simple docstring""" UpperCamelCase : Tuple = dataset.map(**SCREAMING_SNAKE_CASE_ ) @get_duration def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Any ): """simple docstring""" UpperCamelCase : int = dataset.filter(**SCREAMING_SNAKE_CASE_ ) def a ( ): """simple docstring""" UpperCamelCase : Optional[int] = {'''num examples''': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} ) UpperCamelCase : List[str] = generate_example_dataset( os.path.join(SCREAMING_SNAKE_CASE_ , '''dataset.arrow''' ) , SCREAMING_SNAKE_CASE_ , num_examples=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE_ ) def tokenize(SCREAMING_SNAKE_CASE_ : Dict ): return tokenizer(examples['''text'''] ) UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ ) with dataset.formatted_as(type='''numpy''' ): UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ ) with dataset.formatted_as(type='''pandas''' ): UpperCamelCase : int = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ ) with dataset.formatted_as(type='''torch''' , columns='''numbers''' ): UpperCamelCase : Dict = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ ) with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ): UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , function=SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = filter(SCREAMING_SNAKE_CASE_ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE_ ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
643
0
from __future__ import annotations def a ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase : Optional[int] = 0 UpperCamelCase : int = len(SCREAMING_SNAKE_CASE_ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: UpperCamelCase : Tuple = i + 1 else: UpperCamelCase : List[Any] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
710
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
643
0
from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch('''socket.socket''' ) @patch('''builtins.open''' ) def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] ): """simple docstring""" UpperCamelCase : int = Mock() UpperCamelCase : Any = conn, Mock() UpperCamelCase : List[Any] = iter([1, None] ) UpperCamelCase : Optional[Any] = lambda SCREAMING_SNAKE_CASE_ : next(SCREAMING_SNAKE_CASE_ ) # ===== invoke ===== send_file(filename='''mytext.txt''' , testing=SCREAMING_SNAKE_CASE_ ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
711
import torch from transformers import AutoModel class UpperCAmelCase_ ( torch.nn.Module): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ): """simple docstring""" super(__SCREAMING_SNAKE_CASE , self ).__init__() UpperCamelCase : List[str] = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 ) UpperCamelCase : List[Any] = torch.nn.Softmax(dim=1 ) def _lowercase ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ): """simple docstring""" return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = W_supports['''sizes'''].tolist() UpperCamelCase : Optional[int] = W_supports['''start_token_id'''].item() UpperCamelCase : Any = W_supports['''end_token_id'''].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] UpperCamelCase : Union[str, Any] = self.BERT(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = None UpperCamelCase : Any = None UpperCamelCase : Optional[Any] = W_supports['''input_ids'''] == start_token_id UpperCamelCase : Any = W_supports['''input_ids'''] == end_token_id for i, size in enumerate(__SCREAMING_SNAKE_CASE ): if i == 0: UpperCamelCase : Optional[int] = 0 else: UpperCamelCase : Tuple = support_sizes[i - 1] UpperCamelCase : Tuple = S[s : s + size][start_token_masks[s : s + size]] UpperCamelCase : List[str] = S[s : s + size][end_token_masks[s : s + size]] UpperCamelCase : Dict = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) UpperCamelCase : Tuple = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: UpperCamelCase : List[str] = torch.vstack((p_starts, p_start) ) UpperCamelCase : Union[str, Any] = torch.vstack((p_ends, p_end) ) else: UpperCamelCase : str = p_start UpperCamelCase : Optional[int] = p_end return p_starts, p_ends
643
0
import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase : int = logging.get_logger(__name__) __UpperCAmelCase : Tuple = "▁" __UpperCAmelCase : Tuple = {"vocab_file": "prophetnet.tokenizer"} __UpperCAmelCase : Optional[int] = { "vocab_file": { "microsoft/xprophetnet-large-wiki100-cased": ( "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer" ), } } __UpperCAmelCase : List[Any] = { "microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False}, } __UpperCAmelCase : List[str] = { "microsoft/xprophetnet-large-wiki100-cased": 512, } def a ( SCREAMING_SNAKE_CASE_ : Any ): """simple docstring""" UpperCamelCase : Optional[Any] = collections.OrderedDict() with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' ) as reader: UpperCamelCase : Optional[Any] = reader.readlines() for index, token in enumerate(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Dict = token.rstrip('''\n''' ) UpperCamelCase : str = index return vocab class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Tuple = VOCAB_FILES_NAMES __UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Any = ["input_ids", "attention_mask"] def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) try: import sentencepiece as spm except ImportError: logger.warning( '''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece''' ''' pip install sentencepiece''' ) raise UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) ) UpperCamelCase : Any = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab UpperCamelCase : Dict = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4} for i in range(10 ): UpperCamelCase : Any = f"""[unused{i}]""" UpperCamelCase : Dict = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab UpperCamelCase : Tuple = 12 UpperCamelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(__SCREAMING_SNAKE_CASE ) def __getstate__( self ): """simple docstring""" UpperCamelCase : Tuple = self.__dict__.copy() UpperCamelCase : List[Any] = None return state def __setstate__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = d try: import sentencepiece as spm except ImportError: logger.warning( '''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece''' ''' pip install sentencepiece''' ) raise # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCamelCase : Tuple = {} UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) if token_ids_a is None: return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : Optional[Any] = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowercase ( self ): """simple docstring""" return len(self.sp_model ) + self.fairseq_offset def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCamelCase : Tuple = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[int] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip() return out_string def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase : int = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: UpperCamelCase : int = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if token_ids_a is None: return token_ids_a + [self.sep_token_id] UpperCamelCase : Tuple = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
712
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase_ ( _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : str = DebertaTokenizer __UpperCamelCase : Optional[int] = True __UpperCamelCase : Optional[int] = DebertaTokenizerFast def _lowercase ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCamelCase : Optional[int] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] UpperCamelCase : Tuple = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) UpperCamelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] UpperCamelCase : List[Any] = {'''unk_token''': '''[UNK]'''} UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) ) def _lowercase ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = '''lower newer''' UpperCamelCase : Union[str, Any] = '''lower newer''' return input_text, output_text def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.get_tokenizer() UpperCamelCase : int = '''lower newer''' UpperCamelCase : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] UpperCamelCase : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token] UpperCamelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = self.get_tokenizer() UpperCamelCase : Optional[Any] = tokenizer('''Hello''' , '''World''' ) UpperCamelCase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE ) @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) UpperCamelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = tokenizer.encode( '''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) UpperCamelCase : str = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] UpperCamelCase : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']] # fmt: off UpperCamelCase : int = { '''input_ids''': [ [1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on UpperCamelCase : List[str] = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE ) for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
643
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __UpperCAmelCase : Optional[Any] = { "configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Union[str, Any] = ["MobileViTFeatureExtractor"] __UpperCAmelCase : Optional[int] = ["MobileViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : List[Any] = [ "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileViTForImageClassification", "MobileViTForSemanticSegmentation", "MobileViTModel", "MobileViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : List[str] = [ "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileViTForImageClassification", "TFMobileViTForSemanticSegmentation", "TFMobileViTModel", "TFMobileViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys __UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
713
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class UpperCAmelCase_ ( _a): '''simple docstring''' def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file: UpperCamelCase : str = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' ) UpperCamelCase : Optional[int] = input_file.read() UpperCamelCase : Union[str, Any] = regexp.search(__SCREAMING_SNAKE_CASE ) return match def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file: UpperCamelCase : Optional[int] = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL ) UpperCamelCase : Tuple = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCamelCase : Dict = regexp.finditer(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = Path('''./datasets''' ) UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__SCREAMING_SNAKE_CASE ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = Path('''./datasets''' ) UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_print_statements(str(__SCREAMING_SNAKE_CASE ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
643
0
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : str = BlenderbotConfig __UpperCamelCase : str = {} __UpperCamelCase : List[Any] = "gelu" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=20 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , ): """simple docstring""" UpperCamelCase : Tuple = parent UpperCamelCase : str = batch_size UpperCamelCase : Dict = seq_length UpperCamelCase : List[str] = is_training UpperCamelCase : int = use_labels UpperCamelCase : Optional[int] = vocab_size UpperCamelCase : Any = hidden_size UpperCamelCase : Tuple = num_hidden_layers UpperCamelCase : Dict = num_attention_heads UpperCamelCase : List[str] = intermediate_size UpperCamelCase : List[Any] = hidden_dropout_prob UpperCamelCase : List[str] = attention_probs_dropout_prob UpperCamelCase : int = max_position_embeddings UpperCamelCase : List[str] = eos_token_id UpperCamelCase : str = pad_token_id UpperCamelCase : Tuple = bos_token_id def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCamelCase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCamelCase : Any = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : Dict = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCamelCase : Union[str, Any] = prepare_blenderbot_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return config, inputs_dict def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = TFBlenderbotModel(config=__SCREAMING_SNAKE_CASE ).get_decoder() UpperCamelCase : List[str] = inputs_dict['''input_ids'''] UpperCamelCase : List[Any] = input_ids[:1, :] UpperCamelCase : Tuple = inputs_dict['''attention_mask'''][:1, :] UpperCamelCase : Optional[int] = inputs_dict['''head_mask'''] UpperCamelCase : Tuple = 1 # first forward pass UpperCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCamelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCamelCase : Dict = tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0] UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCamelCase : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCamelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx] UpperCamelCase : str = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1e-3 ) def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , ): """simple docstring""" if attention_mask is None: UpperCamelCase : Dict = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCamelCase : Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCamelCase : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCamelCase : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCAmelCase_ ( _a, _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : Optional[int] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __UpperCamelCase : Optional[Any] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __UpperCamelCase : Union[str, Any] = ( { "conversational": TFBlenderbotForConditionalGeneration, "feature-extraction": TFBlenderbotModel, "summarization": TFBlenderbotForConditionalGeneration, "text2text-generation": TFBlenderbotForConditionalGeneration, "translation": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __UpperCamelCase : Tuple = True __UpperCamelCase : List[str] = False __UpperCamelCase : List[str] = False def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = TFBlenderbotModelTester(self ) UpperCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE ) @require_tokenizers @require_tf class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' __UpperCamelCase : Optional[Any] = ["My friends are cool but they eat too many carbs."] __UpperCamelCase : Any = "facebook/blenderbot-400M-distill" @cached_property def _lowercase ( self ): """simple docstring""" return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = self.tokenizer(self.src_text , return_tensors='''tf''' ) UpperCamelCase : Optional[Any] = self.model.generate( model_inputs.input_ids , ) UpperCamelCase : int = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__SCREAMING_SNAKE_CASE )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
714
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : Any = XGLMConfig __UpperCamelCase : Dict = {} __UpperCamelCase : List[str] = "gelu" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=14 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=0.02 , ): """simple docstring""" UpperCamelCase : Any = parent UpperCamelCase : Optional[int] = batch_size UpperCamelCase : str = seq_length UpperCamelCase : List[str] = is_training UpperCamelCase : Tuple = use_input_mask UpperCamelCase : Union[str, Any] = use_labels UpperCamelCase : int = vocab_size UpperCamelCase : Optional[int] = d_model UpperCamelCase : Any = num_hidden_layers UpperCamelCase : List[str] = num_attention_heads UpperCamelCase : Optional[Any] = ffn_dim UpperCamelCase : Optional[int] = activation_function UpperCamelCase : List[str] = activation_dropout UpperCamelCase : Any = attention_dropout UpperCamelCase : str = max_position_embeddings UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : int = None UpperCamelCase : Dict = 0 UpperCamelCase : int = 2 UpperCamelCase : Any = 1 def _lowercase ( self ): """simple docstring""" return XGLMConfig.from_pretrained('''facebook/xglm-564M''' ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) UpperCamelCase : int = None if self.use_input_mask: UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase : Tuple = self.get_config() UpperCamelCase : str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowercase ( self ): """simple docstring""" return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__SCREAMING_SNAKE_CASE , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Dict = config_and_inputs UpperCamelCase : List[str] = { '''input_ids''': input_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_tf class UpperCAmelCase_ ( _a, _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __UpperCamelCase : Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else () __UpperCamelCase : Any = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) __UpperCamelCase : Optional[int] = False __UpperCamelCase : List[Any] = False __UpperCamelCase : List[Any] = False def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = TFXGLMModelTester(self ) UpperCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , n_embd=37 ) def _lowercase ( self ): """simple docstring""" self.config_tester.run_common_tests() @slow def _lowercase ( self ): """simple docstring""" for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : List[Any] = TFXGLMModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' ) def _lowercase ( self ): """simple docstring""" super().test_resize_token_embeddings() @require_tf class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' @slow def _lowercase ( self , __SCREAMING_SNAKE_CASE=True ): """simple docstring""" UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) UpperCamelCase : List[Any] = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off UpperCamelCase : str = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581] # fmt: on UpperCamelCase : Union[str, Any] = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __SCREAMING_SNAKE_CASE ) @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) tf.random.set_seed(0 ) UpperCamelCase : Tuple = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' ) UpperCamelCase : int = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(''':/CPU:0''' ): UpperCamelCase : str = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , seed=[7, 0] ) UpperCamelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = ( '''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due''' ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) UpperCamelCase : Tuple = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) UpperCamelCase : Tuple = '''left''' # use different length sentences to test batching UpperCamelCase : Any = [ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When''', '''Hello, my dog is a little''', ] UpperCamelCase : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' , padding=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = inputs['''input_ids'''] UpperCamelCase : Optional[int] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 ) UpperCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids UpperCamelCase : Optional[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 ) UpperCamelCase : str = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids UpperCamelCase : List[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 ) UpperCamelCase : Any = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = [ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ''' '''a single''', '''Hello, my dog is a little bit of a shy one, but he is very friendly''', ] self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
643
0
def a ( SCREAMING_SNAKE_CASE_ : int = 1_0_0_0 ): """simple docstring""" UpperCamelCase : str = -1 UpperCamelCase : Dict = 0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c UpperCamelCase : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a) UpperCamelCase : Union[str, Any] = n - a - b if c * c == (a * a + b * b): UpperCamelCase : List[str] = a * b * c if candidate >= product: UpperCamelCase : Any = candidate return product if __name__ == "__main__": print(f'''{solution() = }''')
715
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : int = logging.get_logger(__name__) __UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} __UpperCAmelCase : str = { "vocab_file": { "facebook/mbart-large-en-ro": ( "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model" ), "facebook/mbart-large-cc25": ( "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json", "facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json", }, } __UpperCAmelCase : Union[str, Any] = { "facebook/mbart-large-en-ro": 1024, "facebook/mbart-large-cc25": 1024, } # fmt: off __UpperCAmelCase : Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[str] = VOCAB_FILES_NAMES __UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"] __UpperCamelCase : Any = MBartTokenizer __UpperCamelCase : List[int] = [] __UpperCamelCase : List[int] = [] def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token super().__init__( vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Dict = vocab_file UpperCamelCase : List[str] = False if not self.vocab_file else True UpperCamelCase : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) UpperCamelCase : List[Any] = { lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES } UpperCamelCase : Dict = src_lang if src_lang is not None else '''en_XX''' UpperCamelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang ) UpperCamelCase : str = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _lowercase ( self ): """simple docstring""" return self._src_lang @src_lang.setter def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Union[str, Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : str = [self.sep_token_id] UpperCamelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) UpperCamelCase : List[str] = src_lang UpperCamelCase : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = tgt_lang_id return inputs def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : Optional[Any] = src_lang UpperCamelCase : Optional[int] = tgt_lang return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def _lowercase ( self ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = [] UpperCamelCase : Dict = [self.eos_token_id, self.cur_lang_code] UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCamelCase : Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code] UpperCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCamelCase : Optional[int] = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return UpperCamelCase : Optional[int] = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
643
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase : List[str] = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : List[Any] = ["YolosFeatureExtractor"] __UpperCAmelCase : List[str] = ["YolosImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : List[Any] = [ "YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST", "YolosForObjectDetection", "YolosModel", "YolosPreTrainedModel", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys __UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
716
import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device __UpperCAmelCase : Dict = False class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' pass @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCamelCase : str = torch.manual_seed(0 ) UpperCamelCase : Union[str, Any] = pipe( image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images UpperCamelCase : List[Any] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase : Dict = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
643
0
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class UpperCAmelCase_ : '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , ): """simple docstring""" UpperCamelCase : Dict = parent UpperCamelCase : Dict = 100 UpperCamelCase : Tuple = batch_size UpperCamelCase : Union[str, Any] = image_size UpperCamelCase : Tuple = patch_size UpperCamelCase : Union[str, Any] = num_channels UpperCamelCase : Dict = is_training UpperCamelCase : List[Any] = use_labels UpperCamelCase : Union[str, Any] = hidden_size UpperCamelCase : List[Any] = num_hidden_layers UpperCamelCase : Tuple = num_attention_heads UpperCamelCase : List[Any] = intermediate_size UpperCamelCase : Dict = hidden_act UpperCamelCase : str = hidden_dropout_prob UpperCamelCase : List[Any] = attention_probs_dropout_prob UpperCamelCase : Optional[int] = type_sequence_label_size UpperCamelCase : int = initializer_range UpperCamelCase : str = scope UpperCamelCase : List[Any] = out_indices UpperCamelCase : Tuple = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCamelCase : List[Any] = (image_size // patch_size) ** 2 UpperCamelCase : List[Any] = num_patches + 1 def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase : Dict = None UpperCamelCase : Optional[Any] = None if self.use_labels: UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCamelCase : List[Any] = self.get_config() return config, pixel_values, labels, pixel_labels def _lowercase ( self ): """simple docstring""" return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = BeitModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Dict = BeitForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = self.type_sequence_label_size UpperCamelCase : Optional[Any] = BeitForImageClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCamelCase : Any = 1 UpperCamelCase : Optional[int] = BeitForImageClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : str = self.num_labels UpperCamelCase : Dict = BeitForSemanticSegmentation(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = self.prepare_config_and_inputs() UpperCamelCase : str = config_and_inputs UpperCamelCase : List[str] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a, _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : Union[str, Any] = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) __UpperCamelCase : Any = ( { "feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } if is_torch_available() else {} ) __UpperCamelCase : List[str] = False __UpperCamelCase : Dict = False __UpperCamelCase : Tuple = False def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = BeitModelTester(self ) UpperCamelCase : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def _lowercase ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''BEiT does not use inputs_embeds''' ) def _lowercase ( self ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def _lowercase ( self ): """simple docstring""" pass def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase : List[Any] = [*signature.parameters.keys()] UpperCamelCase : str = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" if not self.model_tester.is_training: return UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase : Tuple = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(__SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]: continue UpperCamelCase : str = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.train() UpperCamelCase : Dict = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = model(**__SCREAMING_SNAKE_CASE ).loss loss.backward() def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCamelCase : int = False UpperCamelCase : Optional[Any] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(__SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue UpperCamelCase : str = model_class(__SCREAMING_SNAKE_CASE ) model.gradient_checkpointing_enable() model.to(__SCREAMING_SNAKE_CASE ) model.train() UpperCamelCase : Optional[int] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = model(**__SCREAMING_SNAKE_CASE ).loss loss.backward() def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase : Any = _config_zero_init(__SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: UpperCamelCase : Optional[Any] = model_class(config=__SCREAMING_SNAKE_CASE ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @slow def _lowercase ( self ): """simple docstring""" for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : Union[str, Any] = BeitModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def a ( ): """simple docstring""" UpperCamelCase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' @cached_property def _lowercase ( self ): """simple docstring""" return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = self.default_image_processor UpperCamelCase : Any = prepare_img() UpperCamelCase : int = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values.to(__SCREAMING_SNAKE_CASE ) # prepare bool_masked_pos UpperCamelCase : Tuple = torch.ones((1, 196) , dtype=torch.bool ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCamelCase : List[Any] = model(pixel_values=__SCREAMING_SNAKE_CASE , bool_masked_pos=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = outputs.logits # verify the logits UpperCamelCase : int = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = torch.tensor( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-2 ) ) @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = self.default_image_processor UpperCamelCase : List[str] = prepare_img() UpperCamelCase : List[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCamelCase : Dict = model(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = outputs.logits # verify the logits UpperCamelCase : Dict = torch.Size((1, 1_000) ) self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) ) UpperCamelCase : Union[str, Any] = 281 self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE ) @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to( __SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = self.default_image_processor UpperCamelCase : Dict = prepare_img() UpperCamelCase : Tuple = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCamelCase : Any = model(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = outputs.logits # verify the logits UpperCamelCase : Union[str, Any] = torch.Size((1, 21_841) ) self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) ) UpperCamelCase : List[Any] = 2_396 self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE ) @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' ) UpperCamelCase : List[str] = model.to(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = BeitImageProcessor(do_resize=__SCREAMING_SNAKE_CASE , size=640 , do_center_crop=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) UpperCamelCase : Any = Image.open(ds[0]['''file'''] ) UpperCamelCase : List[str] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCamelCase : List[Any] = model(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = outputs.logits # verify the logits UpperCamelCase : List[str] = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' ) if is_pillow_less_than_a: UpperCamelCase : Optional[Any] = torch.tensor( [ [[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]], [[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]], [[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]], ] , device=__SCREAMING_SNAKE_CASE , ) else: UpperCamelCase : Any = torch.tensor( [ [[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]], [[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]], [[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]], ] , device=__SCREAMING_SNAKE_CASE , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' ) UpperCamelCase : str = model.to(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = BeitImageProcessor(do_resize=__SCREAMING_SNAKE_CASE , size=640 , do_center_crop=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) UpperCamelCase : Dict = Image.open(ds[0]['''file'''] ) UpperCamelCase : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCamelCase : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = outputs.logits.detach().cpu() UpperCamelCase : Dict = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(500, 300)] ) UpperCamelCase : Tuple = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE ) UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
717
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __UpperCAmelCase : Dict = logging.get_logger(__name__) class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Any = ["input_features"] def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=16_000 , __SCREAMING_SNAKE_CASE=160 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__( feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[str] = n_fft UpperCamelCase : Dict = hop_length UpperCamelCase : Dict = chunk_length UpperCamelCase : List[str] = chunk_length * sampling_rate UpperCamelCase : Dict = self.n_samples // hop_length UpperCamelCase : str = sampling_rate UpperCamelCase : Union[str, Any] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[str] = spectrogram( __SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , ) UpperCamelCase : int = log_spec[:, :-1] UpperCamelCase : int = np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 ) UpperCamelCase : Any = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 ): """simple docstring""" if attention_mask is not None: UpperCamelCase : List[Any] = np.array(__SCREAMING_SNAKE_CASE , np.intaa ) UpperCamelCase : Optional[Any] = [] for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ): UpperCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: UpperCamelCase : Optional[int] = padding_value normed_input_values.append(__SCREAMING_SNAKE_CASE ) else: UpperCamelCase : Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "max_length" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) UpperCamelCase : Tuple = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) UpperCamelCase : Union[str, Any] = is_batched_numpy or ( isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCamelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ): UpperCamelCase : int = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCamelCase : Optional[int] = [np.asarray([raw_speech] ).T] UpperCamelCase : Optional[int] = BatchFeature({'''input_features''': raw_speech} ) # convert into correct format for padding UpperCamelCase : Optional[Any] = self.pad( __SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: UpperCamelCase : Optional[Any] = self.zero_mean_unit_var_norm( padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , ) UpperCamelCase : List[str] = np.stack(padded_inputs['''input_features'''] , axis=0 ) # make sure list is in array format UpperCamelCase : Dict = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 ) UpperCamelCase : Tuple = [self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]] if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] else: UpperCamelCase : Dict = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) UpperCamelCase : Union[str, Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length] if return_tensors is not None: UpperCamelCase : Dict = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE ) return padded_inputs def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ ) UpperCamelCase : List[str] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
643
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
718
import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer __UpperCAmelCase : Dict = logging.get_logger(__name__) __UpperCAmelCase : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __UpperCAmelCase : Dict = { "vocab_file": { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt" ), } } __UpperCAmelCase : Tuple = { "junnyu/roformer_chinese_small": 1536, "junnyu/roformer_chinese_base": 1536, "junnyu/roformer_chinese_char_small": 512, "junnyu/roformer_chinese_char_base": 512, "junnyu/roformer_small_discriminator": 128, "junnyu/roformer_small_generator": 128, } __UpperCAmelCase : Any = { "junnyu/roformer_chinese_small": {"do_lower_case": True}, "junnyu/roformer_chinese_base": {"do_lower_case": True}, "junnyu/roformer_chinese_char_small": {"do_lower_case": True}, "junnyu/roformer_chinese_char_base": {"do_lower_case": True}, "junnyu/roformer_small_discriminator": {"do_lower_case": True}, "junnyu/roformer_small_generator": {"do_lower_case": True}, } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : str = VOCAB_FILES_NAMES __UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : int = PRETRAINED_INIT_CONFIGURATION __UpperCamelCase : Any = RoFormerTokenizer def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE ) != do_lower_case or pre_tok_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE ) != strip_accents ): UpperCamelCase : List[Any] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) ) UpperCamelCase : Optional[int] = do_lower_case UpperCamelCase : Optional[Any] = strip_accents UpperCamelCase : List[Any] = pre_tok_class(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = do_lower_case def __getstate__( self ): """simple docstring""" UpperCamelCase : Optional[int] = self.__dict__.copy() UpperCamelCase : Any = BertPreTokenizer() return state def __setstate__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = d UpperCamelCase : List[str] = self.__dict__['''_tokenizer'''].get_vocab() UpperCamelCase : Any = PreTokenizer.custom(JiebaPreTokenizer(__SCREAMING_SNAKE_CASE ) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" UpperCamelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : Dict = [self.sep_token_id] UpperCamelCase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : List[Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE ) return tuple(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : Any = BertPreTokenizer() return super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
643
0
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase : Dict = logging.get_logger(__name__) __UpperCAmelCase : Dict = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __UpperCAmelCase : Union[str, Any] = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } __UpperCAmelCase : Union[str, Any] = {"facebook/blenderbot_small-90M": 512} def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" UpperCamelCase : int = set() UpperCamelCase : str = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCamelCase : Tuple = char UpperCamelCase : Dict = set(SCREAMING_SNAKE_CASE_ ) return pairs class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES __UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : int = ["input_ids", "attention_mask"] def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="__start__" , __SCREAMING_SNAKE_CASE="__end__" , __SCREAMING_SNAKE_CASE="__unk__" , __SCREAMING_SNAKE_CASE="__null__" , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle: UpperCamelCase : str = json.load(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()} with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle: UpperCamelCase : Dict = merges_handle.read().split('''\n''' )[1:-1] UpperCamelCase : List[str] = [tuple(merge.split() ) for merge in merges] UpperCamelCase : Any = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) UpperCamelCase : Optional[int] = {} @property def _lowercase ( self ): """simple docstring""" return len(self.encoder ) def _lowercase ( self ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if token in self.cache: return self.cache[token] UpperCamelCase : int = re.sub('''([.,!?()])''' , R''' \1''' , __SCREAMING_SNAKE_CASE ) UpperCamelCase : int = re.sub('''(\')''' , R''' \1 ''' , __SCREAMING_SNAKE_CASE ) UpperCamelCase : int = re.sub(R'''\s{2,}''' , ''' ''' , __SCREAMING_SNAKE_CASE ) if "\n" in token: UpperCamelCase : Tuple = token.replace('''\n''' , ''' __newln__''' ) UpperCamelCase : int = token.split(''' ''' ) UpperCamelCase : int = [] for token in tokens: if not len(__SCREAMING_SNAKE_CASE ): continue UpperCamelCase : Optional[int] = token.lower() UpperCamelCase : Union[str, Any] = tuple(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) UpperCamelCase : List[str] = get_pairs(__SCREAMING_SNAKE_CASE ) if not pairs: words.append(__SCREAMING_SNAKE_CASE ) continue while True: UpperCamelCase : List[Any] = min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase : int = bigram UpperCamelCase : Optional[Any] = [] UpperCamelCase : List[Any] = 0 while i < len(__SCREAMING_SNAKE_CASE ): try: UpperCamelCase : List[Any] = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) new_word.extend(word[i:j] ) UpperCamelCase : Dict = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase : List[Any] = tuple(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = new_word if len(__SCREAMING_SNAKE_CASE ) == 1: break else: UpperCamelCase : str = get_pairs(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = '''@@ '''.join(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = word[:-4] UpperCamelCase : str = word words.append(__SCREAMING_SNAKE_CASE ) return " ".join(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[Any] = [] UpperCamelCase : Any = re.findall(R'''\S+\n?''' , __SCREAMING_SNAKE_CASE ) for token in words: split_tokens.extend(list(self.bpe(__SCREAMING_SNAKE_CASE ).split(''' ''' ) ) ) return split_tokens def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = token.lower() return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = ''' '''.join(__SCREAMING_SNAKE_CASE ).replace('''@@ ''' , '''''' ).strip() return out_string def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase : str = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCamelCase : str = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE ) + '''\n''' ) UpperCamelCase : Tuple = 0 with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) UpperCamelCase : Union[str, Any] = token_index writer.write(''' '''.join(__SCREAMING_SNAKE_CASE ) + '''\n''' ) index += 1 return vocab_file, merge_file
719
from __future__ import annotations def a ( SCREAMING_SNAKE_CASE_ : list[int] ): """simple docstring""" if len(SCREAMING_SNAKE_CASE_ ) == 0: return array UpperCamelCase , UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ ) # Compute the variables UpperCamelCase : Union[str, Any] = _max - _min + 1 UpperCamelCase , UpperCamelCase : Optional[Any] = [0] * holes_range, [0] * holes_range # Make the sorting. for i in array: UpperCamelCase : Optional[int] = i - _min UpperCamelCase : Any = i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. UpperCamelCase : str = 0 for i in range(SCREAMING_SNAKE_CASE_ ): while holes_repeat[i] > 0: UpperCamelCase : List[Any] = holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase : Any = input("Enter numbers separated by comma:\n") __UpperCAmelCase : int = [int(x) for x in user_input.split(",")] print(pigeon_sort(unsorted))
643
0
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers __UpperCAmelCase : Optional[int] = "3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None)
720
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter __UpperCAmelCase : List[Any] = True except ImportError: __UpperCAmelCase : List[str] = False __UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name def a ( SCREAMING_SNAKE_CASE_ : Namespace ): """simple docstring""" return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class UpperCAmelCase_ ( _a): '''simple docstring''' @staticmethod def _lowercase ( __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[Any] = parser.add_parser('''add-new-model''' ) add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' ) add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''' ) add_new_model_parser.add_argument( '''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''' ) add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE ) def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = testing UpperCamelCase : Any = testing_file UpperCamelCase : Dict = path def _lowercase ( self ): """simple docstring""" warnings.warn( '''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ''' '''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ''' '''checks, you should use `transformers-cli add-new-model-like` instead.''' ) if not _has_cookiecutter: raise ImportError( '''Model creation dependencies are required to use the `add_new_model` command. Install them by running ''' '''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory UpperCamelCase : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]] if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError( '''Several directories starting with `cookiecutter-template-` in current working directory. ''' '''Please clean your directory by removing all folders starting with `cookiecutter-template-` or ''' '''change your working directory.''' ) UpperCamelCase : Dict = ( Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) UpperCamelCase : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model''' # Execute cookiecutter if not self._testing: cookiecutter(str(__SCREAMING_SNAKE_CASE ) ) else: with open(self._testing_file , '''r''' ) as configuration_file: UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0] # Retrieve configuration with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file: UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = configuration['''lowercase_modelname'''] UpperCamelCase : int = configuration['''generate_tensorflow_pytorch_and_flax'''] os.remove(f"""{directory}/configuration.json""" ) UpperCamelCase : str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax UpperCamelCase : Any = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax UpperCamelCase : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax UpperCamelCase : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}""" os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__SCREAMING_SNAKE_CASE ) # Tests require submodules as they have parent imports with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ): pass shutil.move( f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , ) shutil.move( f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , ) def remove_copy_lines(__SCREAMING_SNAKE_CASE ): with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f: UpperCamelCase : Any = f.readlines() with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(__SCREAMING_SNAKE_CASE ) if output_pytorch: if not self._testing: remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" ) os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" ) if output_tensorflow: if not self._testing: remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" ) os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" ) if output_flax: if not self._testing: remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" ) os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , ) shutil.move( f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # Create temp file UpperCamelCase , UpperCamelCase : Optional[Any] = mkstemp() UpperCamelCase : Tuple = False with fdopen(__SCREAMING_SNAKE_CASE , '''w''' ) as new_file: with open(__SCREAMING_SNAKE_CASE ) as old_file: for line in old_file: new_file.write(__SCREAMING_SNAKE_CASE ) if line_to_copy_below in line: UpperCamelCase : Optional[int] = True for line_to_copy in lines_to_copy: new_file.write(__SCREAMING_SNAKE_CASE ) if not line_found: raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" ) # Copy the file permissions from the old file to the new file copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Remove original file remove(__SCREAMING_SNAKE_CASE ) # Move new file move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def skip_units(__SCREAMING_SNAKE_CASE ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(__SCREAMING_SNAKE_CASE ): with open(__SCREAMING_SNAKE_CASE ) as datafile: UpperCamelCase : int = [] UpperCamelCase : Dict = False UpperCamelCase : List[Any] = False for line in datafile: if "# To replace in: " in line and "##" not in line: UpperCamelCase : Dict = line.split('''"''' )[1] UpperCamelCase : int = skip_units(__SCREAMING_SNAKE_CASE ) elif "# Below: " in line and "##" not in line: UpperCamelCase : Dict = line.split('''"''' )[1] UpperCamelCase : List[str] = skip_units(__SCREAMING_SNAKE_CASE ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = [] elif "# Replace with" in line and "##" not in line: UpperCamelCase : Tuple = [] elif "##" not in line: lines_to_copy.append(__SCREAMING_SNAKE_CASE ) remove(__SCREAMING_SNAKE_CASE ) replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" ) os.rmdir(__SCREAMING_SNAKE_CASE )
643
0
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCAmelCase_ : '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=[8, 16, 32, 64] , __SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , __SCREAMING_SNAKE_CASE=[2, 3, 4] , __SCREAMING_SNAKE_CASE=1 , ): """simple docstring""" UpperCamelCase : str = parent UpperCamelCase : Dict = batch_size UpperCamelCase : Tuple = image_size UpperCamelCase : List[str] = num_channels UpperCamelCase : Union[str, Any] = embeddings_size UpperCamelCase : Dict = hidden_sizes UpperCamelCase : Any = depths UpperCamelCase : int = is_training UpperCamelCase : Dict = use_labels UpperCamelCase : Any = hidden_act UpperCamelCase : Optional[Any] = num_labels UpperCamelCase : Any = scope UpperCamelCase : int = len(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = out_features UpperCamelCase : str = out_indices UpperCamelCase : List[str] = num_groups def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase : Optional[int] = None if self.use_labels: UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase : Optional[int] = self.get_config() return config, pixel_values, labels def _lowercase ( self ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[str] = BitModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Dict = self.num_labels UpperCamelCase : Any = BitForImageClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Union[str, Any] = BitBackbone(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : int = model(__SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None UpperCamelCase : List[str] = None UpperCamelCase : str = BitBackbone(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : Any = model(__SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs() UpperCamelCase : List[str] = config_and_inputs UpperCamelCase : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a, _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __UpperCamelCase : Optional[int] = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) __UpperCamelCase : int = False __UpperCamelCase : Dict = False __UpperCamelCase : List[Any] = False __UpperCamelCase : List[str] = False __UpperCamelCase : List[str] = False def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = BitModelTester(self ) UpperCamelCase : Tuple = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowercase ( self ): """simple docstring""" return @unittest.skip(reason='''Bit does not output attentions''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def _lowercase ( self ): """simple docstring""" pass def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Any = model_class(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase : Dict = [*signature.parameters.keys()] UpperCamelCase : int = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : List[str] = model_class(config=__SCREAMING_SNAKE_CASE ) for name, module in model.named_modules(): if isinstance(__SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def _lowercase ( self ): """simple docstring""" def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): UpperCamelCase : Any = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) UpperCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase : List[str] = self.model_tester.num_stages self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase : List[Any] = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCamelCase : List[Any] = layer_type UpperCamelCase : Tuple = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase : int = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def _lowercase ( self ): """simple docstring""" pass def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) @slow def _lowercase ( self ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : str = BitModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def a ( ): """simple docstring""" UpperCamelCase : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' @cached_property def _lowercase ( self ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = self.default_image_processor UpperCamelCase : str = prepare_img() UpperCamelCase : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCamelCase : List[str] = model(**__SCREAMING_SNAKE_CASE ) # verify the logits UpperCamelCase : Dict = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) UpperCamelCase : int = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @require_torch class UpperCAmelCase_ ( _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : List[str] = (BitBackbone,) if is_torch_available() else () __UpperCamelCase : Optional[Any] = BitConfig __UpperCamelCase : Optional[Any] = False def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = BitModelTester(self )
721
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase : str = cva.getAffineTransform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return cva.warpAffine(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (rows, cols) ) if __name__ == "__main__": # read original image __UpperCAmelCase : Tuple = cva.imread( str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg") ) # turn image in gray scale value __UpperCAmelCase : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape __UpperCAmelCase , __UpperCAmelCase : Tuple = gray_img.shape # set different points to rotate image __UpperCAmelCase : Optional[int] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) __UpperCAmelCase : Optional[int] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) __UpperCAmelCase : Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) __UpperCAmelCase : int = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list __UpperCAmelCase : Union[str, Any] = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations __UpperCAmelCase : List[str] = plt.figure(1) __UpperCAmelCase : Dict = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, "gray") plt.title(titles[i]) plt.axis("off") plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
643
0
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging snake_case_ : Union[str, Any] = logging.get_logger(__name__) class __a (lowerCamelCase ): __a : Tuple = ["pixel_values"] def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None: """simple docstring""" UpperCAmelCase_ : int = do_resize UpperCAmelCase_ : Tuple = do_rescale UpperCAmelCase_ : List[Any] = size_divisor UpperCAmelCase_ : Any = resample super().__init__(**__magic_name__ ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ ) # Rounds the height and width down to the closest multiple of size_divisor UpperCAmelCase_ : Dict = height // size_divisor * size_divisor UpperCAmelCase_ : Dict = width // size_divisor * size_divisor UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) return image def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray: """simple docstring""" return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature: """simple docstring""" UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor UpperCAmelCase_ : Dict = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ ) if not valid_images(__magic_name__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images] if do_resize: UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images] if do_rescale: UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images] UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images] UpperCAmelCase_ : int = {'''pixel_values''': images} return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
644
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]] UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ ) self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) ) with self.assertRaises(__magic_name__ ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__magic_name__ ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__magic_name__ ): DisjunctiveConstraint(__magic_name__ ) # fails here def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 ) UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 ) UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : int ) -> Dict: """simple docstring""" UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
644
1
'''simple docstring''' import argparse from collections import defaultdict def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(SCREAMING_SNAKE_CASE__, '''r''' ) as f: UpperCAmelCase_ : List[str] = f.readlines() UpperCAmelCase_ : List[str] = F"""class {class_name}(""" UpperCAmelCase_ : int = F"""{4 * " "}def {test_name}(""" UpperCAmelCase_ : Any = F"""{8 * " "}{correct_line.split()[0]}""" UpperCAmelCase_ : str = F"""{16 * " "}{correct_line.split()[0]}""" UpperCAmelCase_ : Tuple = False UpperCAmelCase_ : Any = False UpperCAmelCase_ : List[str] = False UpperCAmelCase_ : Dict = False UpperCAmelCase_ : Optional[int] = 0 UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Dict = [] for line in lines: if line.startswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : List[Any] = True elif in_class and line.startswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : List[Any] = True elif in_class and in_func and (line.startswith(SCREAMING_SNAKE_CASE__ ) or line.startswith(SCREAMING_SNAKE_CASE__ )): UpperCAmelCase_ : str = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: UpperCAmelCase_ : Optional[Any] = True if in_class and in_func and in_line: if ")" not in line: continue else: UpperCAmelCase_ : str = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) UpperCAmelCase_ : int = False else: new_lines.append(SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__, '''w''' ) as f: for line in new_lines: f.write(SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Optional[Any]=None ) -> str: if fail is not None: with open(SCREAMING_SNAKE_CASE__, '''r''' ) as f: UpperCAmelCase_ : List[Any] = {l.strip() for l in f.readlines()} else: UpperCAmelCase_ : Dict = None with open(SCREAMING_SNAKE_CASE__, '''r''' ) as f: UpperCAmelCase_ : Dict = f.readlines() UpperCAmelCase_ : int = defaultdict(SCREAMING_SNAKE_CASE__ ) for line in correct_lines: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = line.split(''';''' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": snake_case_ : Optional[int] = argparse.ArgumentParser() parser.add_argument("--correct_filename", help="filename of tests with expected result") parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None) snake_case_ : str = parser.parse_args() main(args.correct_filename, args.fail_filename)
644
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None) snake_case_ : Optional[Any] = df.shape[:1][0] # If you're using some other dataset input the target column snake_case_ : Any = df.iloc[:, 1:2] snake_case_ : str = actual_data.values.reshape(len_data, 1) snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data) snake_case_ : List[str] = 10 snake_case_ : Any = 5 snake_case_ : Any = 20 snake_case_ : Tuple = len_data - periods * look_back snake_case_ : str = actual_data[:division] snake_case_ : Optional[int] = actual_data[division - look_back :] snake_case_ ,snake_case_ : Any = [], [] snake_case_ ,snake_case_ : Union[str, Any] = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) snake_case_ : Any = np.array(train_x) snake_case_ : Optional[Any] = np.array(test_x) snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y]) snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y]) snake_case_ : List[Any] = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss="mean_squared_error", optimizer="adam") snake_case_ : Dict = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) snake_case_ : Optional[Any] = model.predict(x_test)
644
1
'''simple docstring''' import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig snake_case_ : Any = logging.get_logger(__name__) class __a : def __init__( self : int , __magic_name__ : str , __magic_name__ : str ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = question_encoder UpperCAmelCase_ : str = generator UpperCAmelCase_ : Dict = self.question_encoder def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[Any] ) -> Optional[int]: """simple docstring""" if os.path.isfile(__magic_name__ ): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) UpperCAmelCase_ : str = os.path.join(__magic_name__ , '''question_encoder_tokenizer''' ) UpperCAmelCase_ : Optional[int] = os.path.join(__magic_name__ , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(__magic_name__ ) self.generator.save_pretrained(__magic_name__ ) @classmethod def UpperCAmelCase__ ( cls : Tuple , __magic_name__ : str , **__magic_name__ : Dict ) -> Union[str, Any]: """simple docstring""" # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer UpperCAmelCase_ : Dict = kwargs.pop('''config''' , __magic_name__ ) if config is None: UpperCAmelCase_ : Optional[Any] = RagConfig.from_pretrained(__magic_name__ ) UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained( __magic_name__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) UpperCAmelCase_ : int = AutoTokenizer.from_pretrained( __magic_name__ , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=__magic_name__ , generator=__magic_name__ ) def __call__( self : Union[str, Any] , *__magic_name__ : Union[str, Any] , **__magic_name__ : List[str] ) -> Any: """simple docstring""" return self.current_tokenizer(*__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : List[Any] , *__magic_name__ : Optional[int] , **__magic_name__ : Dict ) -> Optional[int]: """simple docstring""" return self.generator.batch_decode(*__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] , *__magic_name__ : List[str] , **__magic_name__ : Optional[Any] ) -> Optional[int]: """simple docstring""" return self.generator.decode(*__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : Dict ) -> List[str]: """simple docstring""" UpperCAmelCase_ : int = self.question_encoder def UpperCAmelCase__ ( self : Any ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Tuple = self.generator def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] , __magic_name__ : Optional[List[str]] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[int] = None , __magic_name__ : str = "longest" , __magic_name__ : str = None , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> BatchEncoding: """simple docstring""" warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , __magic_name__ , ) if max_length is None: UpperCAmelCase_ : List[Any] = self.current_tokenizer.model_max_length UpperCAmelCase_ : List[Any] = self( __magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: UpperCAmelCase_ : str = self.current_tokenizer.model_max_length UpperCAmelCase_ : Dict = self( text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) UpperCAmelCase_ : Any = labels['''input_ids'''] return model_inputs
644
'''simple docstring''' from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker snake_case_ : Union[str, Any] = "CompVis/stable-diffusion-v1-1" snake_case_ : Dict = "CompVis/stable-diffusion-v1-2" snake_case_ : Any = "CompVis/stable-diffusion-v1-3" snake_case_ : str = "CompVis/stable-diffusion-v1-4" class __a (lowerCamelCase ): def __init__( self : Any , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ) -> str: """simple docstring""" super()._init_() UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : Tuple = StableDiffusionPipeline( vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, Any]: """simple docstring""" return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith('''_''' )} def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" self.enable_attention_slicing(__magic_name__ ) @torch.no_grad() def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Any , ) -> Any: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ) -> List[str]: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> str: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(__magic_name__ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.2 UpperCAmelCase_ : int = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.3 UpperCAmelCase_ : str = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.4 UpperCAmelCase_ : str = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : dict ) -> set: UpperCAmelCase_ : Union[str, Any] = set() # edges = list of graph's edges UpperCAmelCase_ : Optional[int] = get_edges(SCREAMING_SNAKE_CASE__ ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = edges.pop() chosen_vertices.add(SCREAMING_SNAKE_CASE__ ) chosen_vertices.add(SCREAMING_SNAKE_CASE__ ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(SCREAMING_SNAKE_CASE__ ) return chosen_vertices def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : dict ) -> set: UpperCAmelCase_ : int = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
644
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler snake_case_ : Optional[int] = 16 snake_case_ : Tuple = 32 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Accelerator, SCREAMING_SNAKE_CASE__ : int = 16, SCREAMING_SNAKE_CASE__ : str = "bert-base-cased" ) -> Dict: UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = load_dataset('''glue''', '''mrpc''' ) def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[int] ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase_ : Tuple = datasets.map( SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=SCREAMING_SNAKE_CASE__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' ) def collate_fn(SCREAMING_SNAKE_CASE__ : str ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' ) return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' ) # Instantiate dataloaders. UpperCAmelCase_ : str = DataLoader( tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = DataLoader( tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ ) return train_dataloader, eval_dataloader def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Any: model.eval() UpperCAmelCase_ : List[str] = 0 for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ : Dict = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(SCREAMING_SNAKE_CASE__ ) - 1: UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase_ : int = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, ) UpperCAmelCase_ : List[str] = metric.compute() return eval_metric["accuracy"] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int ) -> Tuple: # Initialize accelerator UpperCAmelCase_ : Union[str, Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ : int = config['''lr'''] UpperCAmelCase_ : Optional[int] = int(config['''num_epochs'''] ) UpperCAmelCase_ : Optional[int] = int(config['''seed'''] ) UpperCAmelCase_ : List[str] = int(config['''batch_size'''] ) UpperCAmelCase_ : Optional[int] = args.model_name_or_path set_seed(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, return_dict=SCREAMING_SNAKE_CASE__ ) # Instantiate optimizer UpperCAmelCase_ : str = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCAmelCase_ : List[str] = optimizer_cls(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ ) if accelerator.state.deepspeed_plugin is not None: UpperCAmelCase_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: UpperCAmelCase_ : Tuple = 1 UpperCAmelCase_ : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=0, num_training_steps=SCREAMING_SNAKE_CASE__, ) else: UpperCAmelCase_ : Any = DummyScheduler(SCREAMING_SNAKE_CASE__, total_num_steps=SCREAMING_SNAKE_CASE__, warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase_ : Union[str, Any] = 0 # We also need to keep track of the stating epoch so files are named properly UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : int = evaluate.load('''glue''', '''mrpc''' ) UpperCAmelCase_ : Optional[Any] = num_epochs if args.partial_train_epoch is not None: UpperCAmelCase_ : List[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) UpperCAmelCase_ : Tuple = args.resume_from_checkpoint.split('''epoch_''' )[1] UpperCAmelCase_ : int = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ ) + 1 UpperCAmelCase_ : Dict = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) accelerator.print('''resumed checkpoint performance:''', SCREAMING_SNAKE_CASE__ ) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] ) accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] ) with open(os.path.join(args.output_dir, F"""state_{starting_epoch-1}.json""" ), '''r''' ) as f: UpperCAmelCase_ : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model UpperCAmelCase_ : int = {} for epoch in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = outputs.loss UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 UpperCAmelCase_ : Tuple = F"""epoch_{epoch}""" UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir, SCREAMING_SNAKE_CASE__ ) accelerator.save_state(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = accuracy UpperCAmelCase_ : Any = lr_scheduler.get_lr()[0] UpperCAmelCase_ : List[str] = optimizer.param_groups[0]['''lr'''] UpperCAmelCase_ : Tuple = epoch UpperCAmelCase_ : Dict = overall_step accelerator.print(F"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir, F"""state_{epoch}.json""" ), '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> List[str]: UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, ) parser.add_argument( '''--output_dir''', type=SCREAMING_SNAKE_CASE__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', ) parser.add_argument( '''--resume_from_checkpoint''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If the training should continue from a checkpoint folder.''', ) parser.add_argument( '''--partial_train_epoch''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If passed, the training will stop after this number of epochs.''', ) parser.add_argument( '''--num_epochs''', type=SCREAMING_SNAKE_CASE__, default=2, help='''Number of train epochs.''', ) UpperCAmelCase_ : Optional[int] = parser.parse_args() UpperCAmelCase_ : List[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
644
1
'''simple docstring''' import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class __a (unittest.TestCase ): @property def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase__ ( self : Optional[int] ) -> Dict: """simple docstring""" UpperCAmelCase_ : Any = ort.SessionOptions() UpperCAmelCase_ : Optional[Any] = False return options def UpperCAmelCase__ ( self : int ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) UpperCAmelCase_ : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) UpperCAmelCase_ : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' ) # using the PNDM scheduler by default UpperCAmelCase_ : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : List[Any] = '''A red cat sitting on a park bench''' UpperCAmelCase_ : str = np.random.RandomState(0 ) UpperCAmelCase_ : int = pipe( prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=__magic_name__ , output_type='''np''' , ) UpperCAmelCase_ : Union[str, Any] = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 1E-2
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]: UpperCAmelCase_ : int = [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ : List[Any] = nums.pop(0 ) UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE__ ) result.extend(SCREAMING_SNAKE_CASE__ ) nums.append(SCREAMING_SNAKE_CASE__ ) return result def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): if start == len(SCREAMING_SNAKE_CASE__ ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start] backtrack(start + 1 ) UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack UpperCAmelCase_ : Optional[int] = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function snake_case_ : Tuple = permutea([1, 2, 3]) print(res) doctest.testmod()
644
1
'''simple docstring''' import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging snake_case_ : Optional[Any] = logging.get_logger(__name__) logging.set_verbosity_info() def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : str ) -> Tuple: if "xprophetnet" in prophetnet_checkpoint_path: UpperCAmelCase_ : str = XLMProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = XLMProphetNetForConditionalGeneration.from_pretrained( SCREAMING_SNAKE_CASE__, output_loading_info=SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase_ : Tuple = ProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained( SCREAMING_SNAKE_CASE__, output_loading_info=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = ['''key_proj''', '''value_proj''', '''query_proj'''] UpperCAmelCase_ : Dict = { '''self_attn''': '''ngram_self_attn''', '''cross_attn''': '''encoder_attn''', '''cross_attn_layer_norm''': '''encoder_attn_layer_norm''', '''feed_forward_layer_norm''': '''final_layer_norm''', '''feed_forward''': '''''', '''intermediate''': '''fc1''', '''output''': '''fc2''', '''key_proj''': '''k_proj''', '''query_proj''': '''q_proj''', '''value_proj''': '''v_proj''', '''word_embeddings''': '''embed_tokens''', '''embeddings_layer_norm''': '''emb_layer_norm''', '''relative_pos_embeddings''': '''relative_linear''', '''ngram_embeddings''': '''ngram_input_embed''', '''position_embeddings''': '''embed_positions''', } for key in loading_info["missing_keys"]: UpperCAmelCase_ : Union[str, Any] = key.split('''.''' ) if attributes[0] == "lm_head": UpperCAmelCase_ : Tuple = prophet UpperCAmelCase_ : Optional[int] = prophet_old else: UpperCAmelCase_ : Optional[int] = prophet.prophetnet UpperCAmelCase_ : Optional[Any] = prophet_old.model UpperCAmelCase_ : Any = False for attribute in attributes: if attribute in mapping: UpperCAmelCase_ : Union[str, Any] = mapping[attribute] if not hasattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) > 0: UpperCAmelCase_ : str = attribute elif hasattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Dict = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" UpperCAmelCase_ : Union[str, Any] = old_model.weight logger.info(F"""{attribute} is initialized.""" ) UpperCAmelCase_ : Optional[int] = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" UpperCAmelCase_ : int = old_model.bias logger.info(F"""{attribute} is initialized""" ) UpperCAmelCase_ : Optional[int] = True break elif attribute in special_keys and hasattr(SCREAMING_SNAKE_CASE__, '''in_proj_weight''' ): UpperCAmelCase_ : Union[str, Any] = old_model.in_proj_weight.shape[0] // 3 UpperCAmelCase_ : int = getattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": UpperCAmelCase_ : List[Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) UpperCAmelCase_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": UpperCAmelCase_ : str = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) UpperCAmelCase_ : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": UpperCAmelCase_ : Any = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) UpperCAmelCase_ : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) UpperCAmelCase_ : Union[str, Any] = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." UpperCAmelCase_ : List[str] = nn.Parameter(old_model.embed_positions.weight[:512, :] ) UpperCAmelCase_ : Optional[int] = True break if attribute.isdigit(): UpperCAmelCase_ : Any = model[int(SCREAMING_SNAKE_CASE__ )] UpperCAmelCase_ : Optional[Any] = old_model[int(SCREAMING_SNAKE_CASE__ )] else: UpperCAmelCase_ : Tuple = getattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if old_attribute == "": UpperCAmelCase_ : Dict = old_model else: if not hasattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): raise ValueError(F"""{old_model} does not have {old_attribute}""" ) UpperCAmelCase_ : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if not is_key_init: raise ValueError(F"""{key} was not correctly initialized!""" ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) prophet.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": snake_case_ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) snake_case_ : Union[str, Any] = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
644
'''simple docstring''' class __a : def __init__( self : List[Any] , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : Optional[Any] = size UpperCAmelCase_ : Tuple = [0] * size UpperCAmelCase_ : Optional[Any] = [0] * size @staticmethod def UpperCAmelCase__ ( __magic_name__ : int ) -> int: """simple docstring""" return index | (index + 1) @staticmethod def UpperCAmelCase__ ( __magic_name__ : int ) -> int: """simple docstring""" return (index & (index + 1)) - 1 def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : int = value while index < self.size: UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1 if current_left_border == index: UpperCAmelCase_ : List[str] = value else: UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ ) def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" right -= 1 # Because of right is exclusive UpperCAmelCase_ : List[str] = 0 while left <= right: UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ ) if left <= current_left: UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] ) UpperCAmelCase_ : Optional[Any] = current_left else: UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
644
1
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin snake_case_ : str = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class __a (lowerCamelCase , unittest.TestCase ): __a : List[str] = XGLMTokenizer __a : Optional[int] = XGLMTokenizerFast __a : List[Any] = True __a : Union[str, Any] = True def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase_ : int = XGLMTokenizer(__magic_name__ , keep_accents=__magic_name__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase__ ( self : Optional[int] ) -> Any: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = '''<pad>''' UpperCAmelCase_ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(len(__magic_name__ ) , 10_08 ) def UpperCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_08 ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : List[str] = XGLMTokenizer(__magic_name__ , keep_accents=__magic_name__ ) UpperCAmelCase_ : Tuple = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__magic_name__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) UpperCAmelCase_ : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual( __magic_name__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def UpperCAmelCase__ ( self : List[str] ) -> str: """simple docstring""" return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) def UpperCAmelCase__ ( self : Dict ) -> Any: """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__magic_name__ , f.name ) UpperCAmelCase_ : Optional[Any] = XGLMTokenizer(f.name , keep_accents=__magic_name__ ) UpperCAmelCase_ : int = pickle.dumps(__magic_name__ ) pickle.loads(__magic_name__ ) def UpperCAmelCase__ ( self : List[Any] ) -> Tuple: """simple docstring""" if not self.test_rust_tokenizer: return UpperCAmelCase_ : str = self.get_tokenizer() UpperCAmelCase_ : int = self.get_rust_tokenizer() UpperCAmelCase_ : Tuple = '''I was born in 92000, and this is falsé.''' UpperCAmelCase_ : List[str] = tokenizer.tokenize(__magic_name__ ) UpperCAmelCase_ : Any = rust_tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : Optional[int] = self.get_rust_tokenizer() UpperCAmelCase_ : str = tokenizer.encode(__magic_name__ ) UpperCAmelCase_ : Tuple = rust_tokenizer.encode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) @slow def UpperCAmelCase__ ( self : Optional[int] ) -> Dict: """simple docstring""" UpperCAmelCase_ : Optional[int] = '''Hello World!''' UpperCAmelCase_ : str = [2, 3_12_27, 44_47, 35] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @slow def UpperCAmelCase__ ( self : int ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth''' ) # fmt: off UpperCAmelCase_ : Optional[int] = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35] # fmt: on self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @slow def UpperCAmelCase__ ( self : str ) -> int: """simple docstring""" # fmt: off UpperCAmelCase_ : Any = { '''input_ids''': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name='''facebook/xglm-564M''' , padding=__magic_name__ , )
644
'''simple docstring''' import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __a : def __init__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=99 , __magic_name__ : Tuple=32 , __magic_name__ : int=5 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[int]=None , ) -> str: """simple docstring""" UpperCAmelCase_ : Any = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : List[Any] = seq_length UpperCAmelCase_ : str = is_training UpperCAmelCase_ : Any = use_input_mask UpperCAmelCase_ : List[str] = use_token_type_ids UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : Optional[Any] = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : Optional[int] = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : str = type_vocab_size UpperCAmelCase_ : Optional[Any] = type_sequence_label_size UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : int = num_labels UpperCAmelCase_ : Optional[int] = num_choices UpperCAmelCase_ : Tuple = scope def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : str = None if self.use_token_type_ids: UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = BioGptModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptForCausalLM(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , *__magic_name__ : Any ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() # create attention mask UpperCAmelCase_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ ) UpperCAmelCase_ : Any = self.seq_length // 2 UpperCAmelCase_ : Tuple = 0 # first forward pass UpperCAmelCase_ , UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ).to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids UpperCAmelCase_ : List[str] = ids_tensor((1,) , __magic_name__ ).item() + 1 UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) UpperCAmelCase_ : str = random_other_next_tokens # append to next input_ids and attn_mask UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : int = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__magic_name__ )] , dim=1 , ) # get two different outputs UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] UpperCAmelCase_ : int = model(__magic_name__ , past_key_values=__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] # select random slice UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach() UpperCAmelCase_ : Dict = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , *__magic_name__ : str ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval() UpperCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ ) # first forward pass UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[ '''last_hidden_state''' ] # select random slice UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , *__magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = BioGptForCausalLM(__magic_name__ ) model.to(__magic_name__ ) if gradient_checkpointing: model.gradient_checkpointing_enable() UpperCAmelCase_ : List[str] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : List[str] ) -> str: """simple docstring""" UpperCAmelCase_ : int = BioGptModel(__magic_name__ ) UpperCAmelCase_ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 ) def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , *__magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : Any = BioGptForTokenClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : int = config_and_inputs UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : str = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) __a : List[Any] = (BioGptForCausalLM,) if is_torch_available() else () __a : Union[str, Any] = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) __a : List[str] = False def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" UpperCAmelCase_ : List[str] = BioGptModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : str = type self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*__magic_name__ , gradient_checkpointing=__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[str]: """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ ) def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__magic_name__ ) UpperCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : Tuple = '''left''' # Define PAD Token = EOS Token = 50256 UpperCAmelCase_ : List[Any] = tokenizer.eos_token UpperCAmelCase_ : List[Any] = model.config.eos_token_id # use different length sentences to test batching UpperCAmelCase_ : Tuple = [ '''Hello, my dog is a little''', '''Today, I''', ] UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = inputs['''input_ids'''].to(__magic_name__ ) UpperCAmelCase_ : Any = model.generate( input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''].to(__magic_name__ ) , ) UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__magic_name__ ) UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ ) UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item() UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__magic_name__ ) UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings ) UpperCAmelCase_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = [ '''Hello, my dog is a little bit bigger than a little bit.''', '''Today, I have a good idea of how to use the information''', ] self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[str] = 3 UpperCAmelCase_ : Tuple = input_dict['''input_ids'''] UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(__magic_name__ ) UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase_ : Dict = BioGptForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[Any] = 3 UpperCAmelCase_ : Optional[int] = '''multi_label_classification''' UpperCAmelCase_ : int = input_dict['''input_ids'''] UpperCAmelCase_ : str = input_ids.ne(1 ).to(__magic_name__ ) UpperCAmelCase_ : Any = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __a (unittest.TestCase ): @slow def UpperCAmelCase__ ( self : List[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) UpperCAmelCase_ : str = model(__magic_name__ )[0] UpperCAmelCase_ : Optional[int] = 4_23_84 UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , __magic_name__ ) UpperCAmelCase_ : List[Any] = torch.tensor( [[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__magic_name__ ) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__magic_name__ ) UpperCAmelCase_ : Optional[int] = model.generate( **__magic_name__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__magic_name__ , ) UpperCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = ( '''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the''' ''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and''' ''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),''' ''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and''' ''' more than 800,000 deaths.''' ) self.assertEqual(__magic_name__ , __magic_name__ )
644
1
'''simple docstring''' from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class __a (lowerCamelCase ): __a : "DiagonalGaussianDistribution" class __a (lowerCamelCase , lowerCamelCase ): __a : str = True @register_to_config def __init__( self : str , __magic_name__ : int = 3 , __magic_name__ : int = 3 , __magic_name__ : Tuple[str] = ("DownEncoderBlock2D",) , __magic_name__ : Tuple[str] = ("UpDecoderBlock2D",) , __magic_name__ : Tuple[int] = (64,) , __magic_name__ : int = 1 , __magic_name__ : str = "silu" , __magic_name__ : int = 4 , __magic_name__ : int = 32 , __magic_name__ : int = 32 , __magic_name__ : float = 0.1_8_2_1_5 , ) -> int: """simple docstring""" super().__init__() # pass init params to Encoder UpperCAmelCase_ : Union[str, Any] = Encoder( in_channels=__magic_name__ , out_channels=__magic_name__ , down_block_types=__magic_name__ , block_out_channels=__magic_name__ , layers_per_block=__magic_name__ , act_fn=__magic_name__ , norm_num_groups=__magic_name__ , double_z=__magic_name__ , ) # pass init params to Decoder UpperCAmelCase_ : Dict = Decoder( in_channels=__magic_name__ , out_channels=__magic_name__ , up_block_types=__magic_name__ , block_out_channels=__magic_name__ , layers_per_block=__magic_name__ , norm_num_groups=__magic_name__ , act_fn=__magic_name__ , ) UpperCAmelCase_ : str = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) UpperCAmelCase_ : Optional[Any] = nn.Convad(__magic_name__ , __magic_name__ , 1 ) UpperCAmelCase_ : str = False UpperCAmelCase_ : List[str] = False # only relevant if vae tiling is enabled UpperCAmelCase_ : Dict = self.config.sample_size UpperCAmelCase_ : List[Any] = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) UpperCAmelCase_ : Dict = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) UpperCAmelCase_ : str = 0.2_5 def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any]=False ) -> Dict: """simple docstring""" if isinstance(__magic_name__ , (Encoder, Decoder) ): UpperCAmelCase_ : List[str] = value def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : bool = True ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[int] = use_tiling def UpperCAmelCase__ ( self : List[Any] ) -> List[str]: """simple docstring""" self.enable_tiling(__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" UpperCAmelCase_ : List[Any] = True def UpperCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" UpperCAmelCase_ : Any = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict[str, AttentionProcessor]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = {} def fn_recursive_add_processors(__magic_name__ : str , __magic_name__ : torch.nn.Module , __magic_name__ : Dict[str, AttentionProcessor] ): if hasattr(__magic_name__ , '''set_processor''' ): UpperCAmelCase_ : Any = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"""{name}.{sub_name}""" , __magic_name__ , __magic_name__ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__magic_name__ , __magic_name__ , __magic_name__ ) return processors def UpperCAmelCase__ ( self : int , __magic_name__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = len(self.attn_processors.keys() ) if isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) != count: raise ValueError( F"""A dict of processors was passed, but the number of processors {len(__magic_name__ )} does not match the""" F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(__magic_name__ : str , __magic_name__ : torch.nn.Module , __magic_name__ : List[str] ): if hasattr(__magic_name__ , '''set_processor''' ): if not isinstance(__magic_name__ , __magic_name__ ): module.set_processor(__magic_name__ ) else: module.set_processor(processor.pop(F"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __magic_name__ , __magic_name__ ) for name, module in self.named_children(): fn_recursive_attn_processor(__magic_name__ , __magic_name__ , __magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : torch.FloatTensor , __magic_name__ : bool = True ) -> AutoencoderKLOutput: """simple docstring""" if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(__magic_name__ , return_dict=__magic_name__ ) if self.use_slicing and x.shape[0] > 1: UpperCAmelCase_ : Optional[int] = [self.encoder(__magic_name__ ) for x_slice in x.split(1 )] UpperCAmelCase_ : Union[str, Any] = torch.cat(__magic_name__ ) else: UpperCAmelCase_ : List[str] = self.encoder(__magic_name__ ) UpperCAmelCase_ : Dict = self.quant_conv(__magic_name__ ) UpperCAmelCase_ : Any = DiagonalGaussianDistribution(__magic_name__ ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : torch.FloatTensor , __magic_name__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(__magic_name__ , return_dict=__magic_name__ ) UpperCAmelCase_ : Tuple = self.post_quant_conv(__magic_name__ ) UpperCAmelCase_ : Optional[Any] = self.decoder(__magic_name__ ) if not return_dict: return (dec,) return DecoderOutput(sample=__magic_name__ ) @apply_forward_hook def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : torch.FloatTensor , __magic_name__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" if self.use_slicing and z.shape[0] > 1: UpperCAmelCase_ : Union[str, Any] = [self._decode(__magic_name__ ).sample for z_slice in z.split(1 )] UpperCAmelCase_ : Any = torch.cat(__magic_name__ ) else: UpperCAmelCase_ : Dict = self._decode(__magic_name__ ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=__magic_name__ ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : int = min(a.shape[2] , b.shape[2] , __magic_name__ ) for y in range(__magic_name__ ): UpperCAmelCase_ : Optional[int] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def UpperCAmelCase__ ( self : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : List[Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , __magic_name__ ) for x in range(__magic_name__ ): UpperCAmelCase_ : Optional[Any] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def UpperCAmelCase__ ( self : str , __magic_name__ : torch.FloatTensor , __magic_name__ : bool = True ) -> AutoencoderKLOutput: """simple docstring""" UpperCAmelCase_ : List[Any] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) UpperCAmelCase_ : Union[str, Any] = int(self.tile_latent_min_size * self.tile_overlap_factor ) UpperCAmelCase_ : List[Any] = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. UpperCAmelCase_ : Any = [] for i in range(0 , x.shape[2] , __magic_name__ ): UpperCAmelCase_ : int = [] for j in range(0 , x.shape[3] , __magic_name__ ): UpperCAmelCase_ : str = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] UpperCAmelCase_ : str = self.encoder(__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = self.quant_conv(__magic_name__ ) row.append(__magic_name__ ) rows.append(__magic_name__ ) UpperCAmelCase_ : List[Any] = [] for i, row in enumerate(__magic_name__ ): UpperCAmelCase_ : Tuple = [] for j, tile in enumerate(__magic_name__ ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: UpperCAmelCase_ : Optional[Any] = self.blend_v(rows[i - 1][j] , __magic_name__ , __magic_name__ ) if j > 0: UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , __magic_name__ , __magic_name__ ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__magic_name__ , dim=3 ) ) UpperCAmelCase_ : List[str] = torch.cat(__magic_name__ , dim=2 ) UpperCAmelCase_ : List[str] = DiagonalGaussianDistribution(__magic_name__ ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__magic_name__ ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : torch.FloatTensor , __magic_name__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" UpperCAmelCase_ : Dict = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) UpperCAmelCase_ : Union[str, Any] = int(self.tile_sample_min_size * self.tile_overlap_factor ) UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. UpperCAmelCase_ : Union[str, Any] = [] for i in range(0 , z.shape[2] , __magic_name__ ): UpperCAmelCase_ : int = [] for j in range(0 , z.shape[3] , __magic_name__ ): UpperCAmelCase_ : Optional[int] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] UpperCAmelCase_ : Union[str, Any] = self.post_quant_conv(__magic_name__ ) UpperCAmelCase_ : Any = self.decoder(__magic_name__ ) row.append(__magic_name__ ) rows.append(__magic_name__ ) UpperCAmelCase_ : Optional[int] = [] for i, row in enumerate(__magic_name__ ): UpperCAmelCase_ : int = [] for j, tile in enumerate(__magic_name__ ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: UpperCAmelCase_ : Tuple = self.blend_v(rows[i - 1][j] , __magic_name__ , __magic_name__ ) if j > 0: UpperCAmelCase_ : str = self.blend_h(row[j - 1] , __magic_name__ , __magic_name__ ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__magic_name__ , dim=3 ) ) UpperCAmelCase_ : List[Any] = torch.cat(__magic_name__ , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=__magic_name__ ) def UpperCAmelCase__ ( self : int , __magic_name__ : torch.FloatTensor , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = sample UpperCAmelCase_ : Any = self.encode(__magic_name__ ).latent_dist if sample_posterior: UpperCAmelCase_ : str = posterior.sample(generator=__magic_name__ ) else: UpperCAmelCase_ : Tuple = posterior.mode() UpperCAmelCase_ : str = self.decode(__magic_name__ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__magic_name__ )
644
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class __a (lowerCamelCase , unittest.TestCase ): __a : List[str] = BlenderbotSmallTokenizer __a : List[Any] = False def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" super().setUp() UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__magic_name__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple: """simple docstring""" kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = '''adapt act apte''' UpperCAmelCase_ : Tuple = '''adapt act apte''' return input_text, output_text def UpperCAmelCase__ ( self : str ) -> Any: """simple docstring""" UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase_ : List[Any] = '''adapt act apte''' UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te'''] UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] UpperCAmelCase_ : Optional[int] = '''I am a small frog.''' UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids'''] UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) UpperCAmelCase_ : List[Any] = '''I am a small frog .''' UpperCAmelCase_ : Any = '''.''' UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids'''] UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
644
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() snake_case_ : List[Any] = logging.get_logger(__name__) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict ) -> Any: UpperCAmelCase_ : Optional[int] = SwinConfig( embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), window_size=12, out_features=['''stage2''', '''stage3''', '''stage4'''], ) UpperCAmelCase_ : Optional[int] = DetaConfig( backbone_config=SCREAMING_SNAKE_CASE__, num_queries=900, encoder_ffn_dim=2048, decoder_ffn_dim=2048, num_feature_levels=5, assign_first_stage=SCREAMING_SNAKE_CASE__, with_box_refine=SCREAMING_SNAKE_CASE__, two_stage=SCREAMING_SNAKE_CASE__, ) # set labels UpperCAmelCase_ : Any = '''huggingface/label-files''' if "o365" in model_name: UpperCAmelCase_ : List[Any] = 366 UpperCAmelCase_ : Any = '''object365-id2label.json''' else: UpperCAmelCase_ : List[Any] = 91 UpperCAmelCase_ : List[str] = '''coco-detection-id2label.json''' UpperCAmelCase_ : Dict = num_labels UpperCAmelCase_ : Optional[Any] = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, repo_type='''dataset''' ) ), '''r''' ) ) UpperCAmelCase_ : int = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} UpperCAmelCase_ : Any = idalabel UpperCAmelCase_ : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]: UpperCAmelCase_ : List[Any] = [] # stem # fmt: off rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') ) rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.reduction.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.bias""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') ) rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') ) rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') ) rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') ) rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') ) rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", F"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", F"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", F"""model.encoder.layers.{i}.self_attn.value_proj.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", F"""model.encoder.layers.{i}.self_attn.value_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", F"""model.encoder.layers.{i}.self_attn.output_proj.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", F"""model.encoder.layers.{i}.self_attn.output_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.weight""", F"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""model.encoder.layers.{i}.fc1.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""model.encoder.layers.{i}.fc1.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""model.encoder.layers.{i}.fc2.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""model.encoder.layers.{i}.fc2.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""model.encoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""model.encoder.layers.{i}.final_layer_norm.bias""") ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.weight""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""model.decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""model.decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.weight""", F"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.bias""", F"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""model.decoder.layers.{i}.fc1.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""model.decoder.layers.{i}.fc1.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""model.decoder.layers.{i}.fc2.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""model.decoder.layers.{i}.fc2.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""model.decoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""model.decoder.layers.{i}.final_layer_norm.bias""") ) # fmt: on return rename_keys def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : int ) -> Dict: UpperCAmelCase_ : str = dct.pop(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = val def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]: UpperCAmelCase_ : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): UpperCAmelCase_ : str = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) UpperCAmelCase_ : Union[str, Any] = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" ) UpperCAmelCase_ : str = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ : Optional[int] = in_proj_weight[:dim, :] UpperCAmelCase_ : List[str] = in_proj_bias[: dim] UpperCAmelCase_ : int = in_proj_weight[ dim : dim * 2, : ] UpperCAmelCase_ : Optional[Any] = in_proj_bias[ dim : dim * 2 ] UpperCAmelCase_ : List[str] = in_proj_weight[ -dim :, : ] UpperCAmelCase_ : Dict = in_proj_bias[-dim :] # fmt: on def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : List[Any] ) -> Any: # transformer decoder self-attention layers UpperCAmelCase_ : List[Any] = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention UpperCAmelCase_ : List[str] = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCAmelCase_ : List[str] = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ : str = in_proj_weight[:hidden_size, :] UpperCAmelCase_ : Tuple = in_proj_bias[:hidden_size] UpperCAmelCase_ : Any = in_proj_weight[ hidden_size : hidden_size * 2, : ] UpperCAmelCase_ : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2] UpperCAmelCase_ : Optional[Any] = in_proj_weight[-hidden_size:, :] UpperCAmelCase_ : List[Any] = in_proj_bias[-hidden_size:] def lowerCamelCase_ ( ) -> Any: UpperCAmelCase_ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase_ : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE__, stream=SCREAMING_SNAKE_CASE__ ).raw ) return im @torch.no_grad() def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any: UpperCAmelCase_ : List[str] = get_deta_config(SCREAMING_SNAKE_CASE__ ) # load original state dict if model_name == "deta-swin-large": UpperCAmelCase_ : int = hf_hub_download(repo_id='''nielsr/deta-checkpoints''', filename='''adet_swin_ft.pth''' ) elif model_name == "deta-swin-large-o365": UpperCAmelCase_ : Dict = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''', filename='''deta_swin_pt_o365.pth''' ) else: raise ValueError(F"""Model name {model_name} not supported""" ) UpperCAmelCase_ : Any = torch.load(SCREAMING_SNAKE_CASE__, map_location='''cpu''' )['''model'''] # original state dict for name, param in state_dict.items(): print(SCREAMING_SNAKE_CASE__, param.shape ) # rename keys UpperCAmelCase_ : Dict = create_rename_keys(SCREAMING_SNAKE_CASE__ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) read_in_swin_q_k_v(SCREAMING_SNAKE_CASE__, config.backbone_config ) read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: UpperCAmelCase_ : Union[str, Any] = state_dict.pop(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Tuple = val if "input_proj" in key: UpperCAmelCase_ : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Tuple = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: UpperCAmelCase_ : Union[str, Any] = state_dict.pop(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[str] = val # finally, create HuggingFace model and load state dict UpperCAmelCase_ : int = DetaForObjectDetection(SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) model.eval() UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu''' model.to(SCREAMING_SNAKE_CASE__ ) # load image processor UpperCAmelCase_ : Union[str, Any] = DetaImageProcessor(format='''coco_detection''' ) # verify our conversion on image UpperCAmelCase_ : str = prepare_img() UpperCAmelCase_ : str = processor(images=SCREAMING_SNAKE_CASE__, return_tensors='''pt''' ) UpperCAmelCase_ : Union[str, Any] = encoding['''pixel_values'''] UpperCAmelCase_ : str = model(pixel_values.to(SCREAMING_SNAKE_CASE__ ) ) # verify logits print('''Logits:''', outputs.logits[0, :3, :3] ) print('''Boxes:''', outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": UpperCAmelCase_ : List[str] = torch.tensor( [[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] ) UpperCAmelCase_ : int = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] ) elif model_name == "deta-swin-large-o365": UpperCAmelCase_ : Optional[int] = torch.tensor( [[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] ) UpperCAmelCase_ : Optional[int] = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] ) assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(SCREAMING_SNAKE_CASE__ ), atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(SCREAMING_SNAKE_CASE__ ), atol=1E-4 ) print('''Everything ok!''' ) if pytorch_dump_folder_path: # Save model and processor logger.info(F"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) # Push to hub if push_to_hub: print('''Pushing model and processor to hub...''' ) model.push_to_hub(F"""jozhang97/{model_name}""" ) processor.push_to_hub(F"""jozhang97/{model_name}""" ) if __name__ == "__main__": snake_case_ : Any = argparse.ArgumentParser() parser.add_argument( "--model_name", type=str, default="deta-swin-large", choices=["deta-swin-large", "deta-swin-large-o365"], help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) snake_case_ : List[str] = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
644
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = get_activation('''swish''' ) self.assertIsInstance(__magic_name__ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' ) self.assertIsInstance(__magic_name__ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_activation('''mish''' ) self.assertIsInstance(__magic_name__ , nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = get_activation('''gelu''' ) self.assertIsInstance(__magic_name__ , nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
644
1
'''simple docstring''' import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() snake_case_ : str = logging.get_logger(__name__) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = WavaVecaForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Dict = downstream_dict['''projector.weight'''] UpperCAmelCase_ : int = downstream_dict['''projector.bias'''] UpperCAmelCase_ : List[Any] = downstream_dict['''model.post_net.linear.weight'''] UpperCAmelCase_ : Any = downstream_dict['''model.post_net.linear.bias'''] return model def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : Dict = WavaVecaForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : str = downstream_dict['''model.linear.weight'''] UpperCAmelCase_ : Optional[int] = downstream_dict['''model.linear.bias'''] return model def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : int ) -> Any: UpperCAmelCase_ : int = WavaVecaForXVector.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = downstream_dict['''connector.weight'''] UpperCAmelCase_ : Any = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): UpperCAmelCase_ : List[Any] = downstream_dict[ F"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] UpperCAmelCase_ : Optional[Any] = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] UpperCAmelCase_ : Union[str, Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] UpperCAmelCase_ : int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] UpperCAmelCase_ : Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] UpperCAmelCase_ : Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] UpperCAmelCase_ : Optional[Any] = downstream_dict['''objective.W'''] return model @torch.no_grad() def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__, map_location='''cpu''' ) UpperCAmelCase_ : List[str] = checkpoint['''Downstream'''] UpperCAmelCase_ : List[str] = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = WavaVecaFeatureExtractor.from_pretrained( SCREAMING_SNAKE_CASE__, return_attention_mask=SCREAMING_SNAKE_CASE__, do_normalize=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Tuple = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): UpperCAmelCase_ : List[Any] = convert_classification(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) elif arch.endswith('''ForAudioFrameClassification''' ): UpperCAmelCase_ : int = convert_diarization(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) elif arch.endswith('''ForXVector''' ): UpperCAmelCase_ : Tuple = convert_xvector(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) else: raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: UpperCAmelCase_ : str = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": snake_case_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") snake_case_ : Any = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
644
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging snake_case_ : Union[str, Any] = logging.get_logger(__name__) class __a (lowerCamelCase ): __a : Tuple = ["pixel_values"] def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None: """simple docstring""" UpperCAmelCase_ : int = do_resize UpperCAmelCase_ : Tuple = do_rescale UpperCAmelCase_ : List[Any] = size_divisor UpperCAmelCase_ : Any = resample super().__init__(**__magic_name__ ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ ) # Rounds the height and width down to the closest multiple of size_divisor UpperCAmelCase_ : Dict = height // size_divisor * size_divisor UpperCAmelCase_ : Dict = width // size_divisor * size_divisor UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) return image def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray: """simple docstring""" return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature: """simple docstring""" UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor UpperCAmelCase_ : Dict = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ ) if not valid_images(__magic_name__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images] if do_resize: UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images] if do_rescale: UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images] UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images] UpperCAmelCase_ : int = {'''pixel_values''': images} return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
644
1
'''simple docstring''' from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record snake_case_ : Union[str, Any] = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n" snake_case_ : str = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n" snake_case_ : List[str] = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n" def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]: return float((preds == labels).mean() ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Union[str, Any]="binary" ) -> Dict: UpperCAmelCase_ : Union[str, Any] = simple_accuracy(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Tuple = float(fa_score(y_true=SCREAMING_SNAKE_CASE__, y_pred=SCREAMING_SNAKE_CASE__, average=SCREAMING_SNAKE_CASE__ ) ) return { "accuracy": acc, "f1": fa, } def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]: UpperCAmelCase_ : int = {} for id_pred, label in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : List[Any] = F"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}""" UpperCAmelCase_ : Tuple = id_pred['''prediction'''] if question_id in question_map: question_map[question_id].append((pred, label) ) else: UpperCAmelCase_ : Dict = [(pred, label)] UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = [], [] for question, preds_labels in question_map.items(): UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = zip(*SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : str = fa_score(y_true=SCREAMING_SNAKE_CASE__, y_pred=SCREAMING_SNAKE_CASE__, average='''macro''' ) fas.append(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = int(sum(pred == label for pred, label in preds_labels ) == len(SCREAMING_SNAKE_CASE__ ) ) ems.append(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = float(sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ ) ) UpperCAmelCase_ : Dict = sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Dict = float(fa_score(y_true=SCREAMING_SNAKE_CASE__, y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __a (datasets.Metric ): def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , ) def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "prediction_text": datasets.Value('''string''' ), }, "references": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "answers": datasets.Sequence(datasets.Value('''string''' ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value('''int64''' ), "paragraph": datasets.Value('''int64''' ), "question": datasets.Value('''int64''' ), }, "prediction": datasets.Value('''int64''' ), }, "references": datasets.Value('''int64''' ), } else: return { "predictions": datasets.Value('''int64''' ), "references": datasets.Value('''int64''' ), } def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : Any ) -> Tuple: """simple docstring""" if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(__magic_name__ , __magic_name__ )} elif self.config_name == "cb": return acc_and_fa(__magic_name__ , __magic_name__ , fa_avg='''macro''' ) elif self.config_name == "record": UpperCAmelCase_ : List[Any] = [ { '''qas''': [ {'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]} for ref in references ] } ] UpperCAmelCase_ : Dict = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions} return evaluate_record(__magic_name__ , __magic_name__ )[0] elif self.config_name == "multirc": return evaluate_multirc(__magic_name__ , __magic_name__ ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(__magic_name__ , __magic_name__ )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int: UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'''{solution(10, 22) = }''')
644
1
'''simple docstring''' import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case_ : str = logging.get_logger(__name__) snake_case_ : int = "▁" snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"} snake_case_ : int = { "sentencepiece_model_file": "sentencepiece.bpe.model", "vocab_file": "vocab.txt", } snake_case_ : Optional[Any] = { "vocab_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", }, "sentencepiece_model_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", }, } snake_case_ : Dict = { "ernie-m-base": 5_14, "ernie-m-large": 5_14, } snake_case_ : Any = { "ernie-m-base": {"do_lower_case": False}, "ernie-m-large": {"do_lower_case": False}, } class __a (lowerCamelCase ): __a : List[str] = ["input_ids"] __a : Union[str, Any] = VOCAB_FILES_NAMES __a : Tuple = PRETRAINED_INIT_CONFIGURATION __a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __a : Union[str, Any] = RESOURCE_FILES_NAMES def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) UpperCAmelCase_ : Optional[Any] = do_lower_case UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__magic_name__ ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ ) else: UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )} UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()} def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any: """simple docstring""" if text is None: return None UpperCAmelCase_ : str = self.tokenize(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', [] for i, ch in enumerate(__magic_name__ ): if ch in self.SP_CHAR_MAPPING: UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ ) else: UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ ) if self.is_whitespace(__magic_name__ ): continue normalized_text += ch char_mapping.extend([i] * len(__magic_name__ ) ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0 if self.do_lower_case: UpperCAmelCase_ : Optional[int] = text.lower() for token in split_tokens: if token[:1] == "▁": UpperCAmelCase_ : Tuple = token[1:] UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) UpperCAmelCase_ : int = end return token_mapping @property def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" return len(self.vocab ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self : str ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.__dict__.copy() UpperCAmelCase_ : Optional[Any] = None return state def __setstate__( self : str , __magic_name__ : Any ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCAmelCase_ : int = {} UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]: """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]: """simple docstring""" if self.sp_model_kwargs.get('''enable_sampling''' ) is True: UpperCAmelCase_ : Dict = True if self.sp_model_kwargs.get('''alpha''' ) is not None: UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' ) if self.sp_model_kwargs.get('''nbest_size''' ) is not None: UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' ) if not enable_sampling: UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ ) else: UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ ) UpperCAmelCase_ : List[Any] = [] for pi, piece in enumerate(__magic_name__ ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0: new_pieces.append(__magic_name__ ) continue else: continue UpperCAmelCase_ : List[str] = 0 for i, chunk in enumerate(__magic_name__ ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__magic_name__ ) UpperCAmelCase_ : List[Any] = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCAmelCase_ : List[str] = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCAmelCase_ : str = i if len(__magic_name__ ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip() return out_string def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ ) UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip() return out_string def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]: """simple docstring""" return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.reverse_vocab.get(__magic_name__ , self.unk_token ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id] UpperCAmelCase_ : List[Any] = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int: """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1] def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method if token_ids_a is None: # [CLS] X [SEP] return (len(__magic_name__ ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3) def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple: """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str: """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict: """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__magic_name__ ) == 1: UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ ) if cat == "Zs": return True return False def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = {} with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(__magic_name__ ): UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' ) UpperCAmelCase_ : Dict = int(__magic_name__ ) return token_to_idx def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = 0 if os.path.isdir(__magic_name__ ): UpperCAmelCase_ : Any = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) UpperCAmelCase_ : Dict = token_index writer.write(token + '''\n''' ) index += 1 UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' ) with open(__magic_name__ , '''wb''' ) as fi: UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (vocab_file,)
644
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __a (lowerCamelCase ): __a : int = "dandelin/vilt-b32-finetuned-vqa" __a : Any = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) __a : Any = "image_qa" __a : str = AutoProcessor __a : Any = AutoModelForVisualQuestionAnswering __a : List[Any] = ["image", "text"] __a : int = ["text"] def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Any ) -> Tuple: """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Image" , __magic_name__ : str ) -> Tuple: """simple docstring""" return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" with torch.no_grad(): return self.model(**__magic_name__ ).logits def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Dict = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
644
1
'''simple docstring''' from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
644
'''simple docstring''' from collections.abc import Iterable from typing import Any class __a : def __init__( self : Optional[Any] , __magic_name__ : int | None = None ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[str] = value UpperCAmelCase_ : Node | None = None # Added in order to delete a node easier UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None def __repr__( self : List[str] ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 ) class __a : def __init__( self : int , __magic_name__ : Node | None = None ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = root def __str__( self : Any ) -> str: """simple docstring""" return str(self.root ) def UpperCAmelCase__ ( self : Any , __magic_name__ : Node , __magic_name__ : Node | None ) -> None: """simple docstring""" if new_children is not None: # reset its kids UpperCAmelCase_ : Dict = node.parent if node.parent is not None: # reset its parent if self.is_right(__magic_name__ ): # If it is the right children UpperCAmelCase_ : Optional[Any] = new_children else: UpperCAmelCase_ : Optional[int] = new_children else: UpperCAmelCase_ : List[str] = new_children def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Node ) -> bool: """simple docstring""" if node.parent and node.parent.right: return node == node.parent.right return False def UpperCAmelCase__ ( self : Union[str, Any] ) -> bool: """simple docstring""" return self.root is None def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None: """simple docstring""" UpperCAmelCase_ : Tuple = Node(__magic_name__ ) # create a new Node if self.empty(): # if Tree is empty UpperCAmelCase_ : List[Any] = new_node # set its root else: # Tree is not empty UpperCAmelCase_ : str = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: UpperCAmelCase_ : Union[str, Any] = new_node # We insert the new node in a leaf break else: UpperCAmelCase_ : List[Any] = parent_node.left else: if parent_node.right is None: UpperCAmelCase_ : List[Any] = new_node break else: UpperCAmelCase_ : Union[str, Any] = parent_node.right UpperCAmelCase_ : Union[str, Any] = parent_node def UpperCAmelCase__ ( self : Optional[Any] , *__magic_name__ : List[str] ) -> None: """simple docstring""" for value in values: self.__insert(__magic_name__ ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> Node | None: """simple docstring""" if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: UpperCAmelCase_ : str = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: UpperCAmelCase_ : List[str] = node.left if value < node.value else node.right return node def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Node | None = None ) -> Node | None: """simple docstring""" if node is None: if self.root is None: return None UpperCAmelCase_ : Dict = self.root if not self.empty(): while node.right is not None: UpperCAmelCase_ : Any = node.right return node def UpperCAmelCase__ ( self : Dict , __magic_name__ : Node | None = None ) -> Node | None: """simple docstring""" if node is None: UpperCAmelCase_ : Optional[int] = self.root if self.root is None: return None if not self.empty(): UpperCAmelCase_ : Union[str, Any] = self.root while node.left is not None: UpperCAmelCase_ : Dict = node.left return node def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : List[str] = self.search(__magic_name__ ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(__magic_name__ , __magic_name__ ) elif node.left is None: # Has only right children self.__reassign_nodes(__magic_name__ , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(__magic_name__ , node.left ) else: UpperCAmelCase_ : List[str] = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore UpperCAmelCase_ : Optional[int] = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Node | None ) -> Iterable: """simple docstring""" if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any]=None ) -> Any: """simple docstring""" if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : list , __magic_name__ : Node | None ) -> None: """simple docstring""" if node: self.inorder(__magic_name__ , node.left ) arr.append(node.value ) self.inorder(__magic_name__ , node.right ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Node ) -> int: """simple docstring""" UpperCAmelCase_ : list[int] = [] self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal return arr[k - 1] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[Node]: UpperCAmelCase_ : Any = [] if curr_node is not None: UpperCAmelCase_ : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def lowerCamelCase_ ( ) -> None: UpperCAmelCase_ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7) UpperCAmelCase_ : Tuple = BinarySearchTree() for i in testlist: t.insert(SCREAMING_SNAKE_CASE__ ) # Prints all the elements of the list in order traversal print(SCREAMING_SNAKE_CASE__ ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''', t.get_max().value ) # type: ignore print('''Min Value: ''', t.get_min().value ) # type: ignore for i in testlist: t.remove(SCREAMING_SNAKE_CASE__ ) print(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
644
1
'''simple docstring''' import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dataset, SCREAMING_SNAKE_CASE__ : Dict[str, str] ) -> Optional[Any]: UpperCAmelCase_ : Dict = args.log_outputs UpperCAmelCase_ : Optional[int] = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric UpperCAmelCase_ : List[str] = load_metric('''wer''' ) UpperCAmelCase_ : Union[str, Any] = load_metric('''cer''' ) # compute metrics UpperCAmelCase_ : Any = wer.compute(references=result['''target'''], predictions=result['''prediction'''] ) UpperCAmelCase_ : Dict = cer.compute(references=result['''target'''], predictions=result['''prediction'''] ) # print & log results UpperCAmelCase_ : str = F"""WER: {wer_result}\nCER: {cer_result}""" print(SCREAMING_SNAKE_CASE__ ) with open(F"""{dataset_id}_eval_results.txt""", '''w''' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCAmelCase_ : List[Any] = F"""log_{dataset_id}_predictions.txt""" UpperCAmelCase_ : Optional[int] = F"""log_{dataset_id}_targets.txt""" with open(SCREAMING_SNAKE_CASE__, '''w''' ) as p, open(SCREAMING_SNAKE_CASE__, '''w''' ) as t: # mapping function to write output def write_to_file(SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Tuple ): p.write(F"""{i}""" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(F"""{i}""" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(SCREAMING_SNAKE_CASE__, with_indices=SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> str: UpperCAmelCase_ : str = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCAmelCase_ : Union[str, Any] = re.sub(SCREAMING_SNAKE_CASE__, '''''', text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCAmelCase_ : Union[str, Any] = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: UpperCAmelCase_ : Optional[Any] = ''' '''.join(text.split(SCREAMING_SNAKE_CASE__ ) ) return text def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]: # load dataset UpperCAmelCase_ : int = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=SCREAMING_SNAKE_CASE__ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCAmelCase_ : Tuple = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCAmelCase_ : Any = feature_extractor.sampling_rate # resample audio UpperCAmelCase_ : List[str] = dataset.cast_column('''audio''', Audio(sampling_rate=SCREAMING_SNAKE_CASE__ ) ) # load eval pipeline if args.device is None: UpperCAmelCase_ : int = 0 if torch.cuda.is_available() else -1 UpperCAmelCase_ : List[Any] = pipeline('''automatic-speech-recognition''', model=args.model_id, device=args.device ) # map function to decode audio def map_to_pred(SCREAMING_SNAKE_CASE__ : List[Any] ): UpperCAmelCase_ : List[str] = asr( batch['''audio''']['''array'''], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s ) UpperCAmelCase_ : Optional[Any] = prediction['''text'''] UpperCAmelCase_ : Tuple = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples UpperCAmelCase_ : List[str] = dataset.map(SCREAMING_SNAKE_CASE__, remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": snake_case_ : Optional[Any] = argparse.ArgumentParser() parser.add_argument( "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers" ) parser.add_argument( "--dataset", type=str, required=True, help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets", ) parser.add_argument( "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice" ) parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`") parser.add_argument( "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds." ) parser.add_argument( "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second." ) parser.add_argument( "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis." ) parser.add_argument( "--device", type=int, default=None, help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.", ) snake_case_ : Any = parser.parse_args() main(args)
644
'''simple docstring''' import sys import turtle def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None: my_pen.up() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) if depth == 0: return triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( "Correct format for using this script: " "python fractals.py <int:depth_for_fractal>" ) snake_case_ : Any = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("red") snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
644
1
'''simple docstring''' from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch('''socket.socket''' ) @patch('''builtins.open''' ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]: # ===== initialization ===== UpperCAmelCase_ : Tuple = Mock() UpperCAmelCase_ : Union[str, Any] = conn, Mock() UpperCAmelCase_ : Tuple = iter([1, None] ) UpperCAmelCase_ : Any = lambda SCREAMING_SNAKE_CASE__ : next(SCREAMING_SNAKE_CASE__ ) # ===== invoke ===== send_file(filename='''mytext.txt''', testing=SCREAMING_SNAKE_CASE__ ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
644
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device snake_case_ : List[str] = False class __a (unittest.TestCase ): pass @nightly @require_torch_gpu class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = pipe.dual_guided( prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__magic_name__ ) UpperCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Any = generator.manual_seed(0 ) UpperCAmelCase_ : Dict = pipe.dual_guided( prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def UpperCAmelCase__ ( self : str ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = '''cyberpunk 2077''' UpperCAmelCase_ : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = pipe.dual_guided( prompt=__magic_name__ , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images UpperCAmelCase_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Union[str, Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger ''' UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : List[Any] = pipe.text_to_image( prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images UpperCAmelCase_ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Any = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase_ : Tuple = pipe.image_variation(__magic_name__ , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
644
1
'''simple docstring''' import sys import turtle def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None: my_pen.up() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) if depth == 0: return triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( "Correct format for using this script: " "python fractals.py <int:depth_for_fractal>" ) snake_case_ : Any = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("red") snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
644
'''simple docstring''' snake_case_ : int = { "Pillow": "Pillow", "accelerate": "accelerate>=0.11.0", "compel": "compel==0.1.8", "black": "black~=23.1", "datasets": "datasets", "filelock": "filelock", "flax": "flax>=0.4.1", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.13.2", "requests-mock": "requests-mock==1.10.0", "importlib_metadata": "importlib_metadata", "invisible-watermark": "invisible-watermark", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2", "jaxlib": "jaxlib>=0.1.65", "Jinja2": "Jinja2", "k-diffusion": "k-diffusion>=0.0.12", "torchsde": "torchsde", "note_seq": "note_seq", "librosa": "librosa", "numpy": "numpy", "omegaconf": "omegaconf", "parameterized": "parameterized", "protobuf": "protobuf>=3.20.3,<4", "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "ruff": "ruff>=0.0.241", "safetensors": "safetensors", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "scipy": "scipy", "onnx": "onnx", "regex": "regex!=2019.12.17", "requests": "requests", "tensorboard": "tensorboard", "torch": "torch>=1.4", "torchvision": "torchvision", "transformers": "transformers>=4.25.1", "urllib3": "urllib3<=2.0.0", }
644
1
'''simple docstring''' import math def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str: UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Union[str, Any] = 0 while num > 0: UpperCAmelCase_ : Any = num % 8 UpperCAmelCase_ : Tuple = octal + (remainder * math.floor(math.pow(10, SCREAMING_SNAKE_CASE__ ) )) counter += 1 UpperCAmelCase_ : Any = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return F"""0o{int(SCREAMING_SNAKE_CASE__ )}""" def lowerCamelCase_ ( ) -> None: print('''\n2 in octal is:''' ) print(decimal_to_octal(2 ) ) # = 2 print('''\n8 in octal is:''' ) print(decimal_to_octal(8 ) ) # = 10 print('''\n65 in octal is:''' ) print(decimal_to_octal(65 ) ) # = 101 print('''\n216 in octal is:''' ) print(decimal_to_octal(216 ) ) # = 330 print('''\n512 in octal is:''' ) print(decimal_to_octal(512 ) ) # = 1000 print('''\n''' ) if __name__ == "__main__": main()
644
'''simple docstring''' import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __a (unittest.TestCase ): @property def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet UpperCAmelCase_ : Dict = KarrasVeScheduler() UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0] UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256''' UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ ) UpperCAmelCase_ : List[Any] = KarrasVeScheduler() UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
644
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class __a : __a : Optional[int] = XGLMConfig __a : Dict = {} __a : Dict = "gelu" def __init__( self : str , __magic_name__ : Optional[Any] , __magic_name__ : Dict=14 , __magic_name__ : str=7 , __magic_name__ : Tuple=True , __magic_name__ : int=True , __magic_name__ : int=True , __magic_name__ : Optional[int]=99 , __magic_name__ : Dict=32 , __magic_name__ : List[Any]=2 , __magic_name__ : Dict=4 , __magic_name__ : Optional[int]=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Union[str, Any]=5_12 , __magic_name__ : Dict=0.0_2 , ) -> int: """simple docstring""" UpperCAmelCase_ : Any = parent UpperCAmelCase_ : Dict = batch_size UpperCAmelCase_ : Any = seq_length UpperCAmelCase_ : List[str] = is_training UpperCAmelCase_ : Optional[int] = use_input_mask UpperCAmelCase_ : Optional[int] = use_labels UpperCAmelCase_ : List[Any] = vocab_size UpperCAmelCase_ : Dict = d_model UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : Tuple = ffn_dim UpperCAmelCase_ : Optional[int] = activation_function UpperCAmelCase_ : str = activation_dropout UpperCAmelCase_ : Union[str, Any] = attention_dropout UpperCAmelCase_ : Union[str, Any] = max_position_embeddings UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : Optional[int] = None UpperCAmelCase_ : Union[str, Any] = 0 UpperCAmelCase_ : int = 2 UpperCAmelCase_ : List[Any] = 1 def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" return XGLMConfig.from_pretrained('''facebook/xglm-564M''' ) def UpperCAmelCase__ ( self : List[Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Dict = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) UpperCAmelCase_ : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : int = self.get_config() UpperCAmelCase_ : str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__magic_name__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__magic_name__ , ) def UpperCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" UpperCAmelCase_ : Any = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Dict = config_and_inputs UpperCAmelCase_ : str = { '''input_ids''': input_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_tf class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : Dict = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __a : Optional[int] = (TFXGLMForCausalLM,) if is_tf_available() else () __a : Any = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) __a : Any = False __a : Dict = False __a : Union[str, Any] = False def UpperCAmelCase__ ( self : int ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : int = TFXGLMModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , n_embd=37 ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() @slow def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = TFXGLMModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) @unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" super().test_resize_token_embeddings() @require_tf class __a (unittest.TestCase ): @slow def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[Any]=True ) -> Dict: """simple docstring""" UpperCAmelCase_ : Optional[int] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) UpperCAmelCase_ : Optional[Any] = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off UpperCAmelCase_ : Any = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on UpperCAmelCase_ : Union[str, Any] = model.generate(__magic_name__ , do_sample=__magic_name__ , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __magic_name__ ) @slow def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : List[str] = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) UpperCAmelCase_ : Optional[int] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) tf.random.set_seed(0 ) UpperCAmelCase_ : Optional[int] = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' ) UpperCAmelCase_ : str = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(''':/CPU:0''' ): UpperCAmelCase_ : List[str] = model.generate(__magic_name__ , do_sample=__magic_name__ , seed=[7, 0] ) UpperCAmelCase_ : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Dict = ( '''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due''' ) self.assertEqual(__magic_name__ , __magic_name__ ) @slow def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" UpperCAmelCase_ : Dict = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) UpperCAmelCase_ : Union[str, Any] = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) UpperCAmelCase_ : Any = '''left''' # use different length sentences to test batching UpperCAmelCase_ : int = [ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When''', '''Hello, my dog is a little''', ] UpperCAmelCase_ : Tuple = tokenizer(__magic_name__ , return_tensors='''tf''' , padding=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = inputs['''input_ids'''] UpperCAmelCase_ : List[str] = model.generate(input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 ) UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids UpperCAmelCase_ : Any = model.generate(input_ids=__magic_name__ , max_new_tokens=12 ) UpperCAmelCase_ : Optional[int] = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids UpperCAmelCase_ : Dict = model.generate(input_ids=__magic_name__ , max_new_tokens=12 ) UpperCAmelCase_ : Any = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Tuple = [ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ''' '''a single''', '''Hello, my dog is a little bit of a shy one, but he is very friendly''', ] self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] )
644
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class __a (lowerCamelCase ): __a : List[Any] = "openai/whisper-base" __a : Optional[Any] = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __a : Any = "transcriber" __a : str = WhisperProcessor __a : List[Any] = WhisperForConditionalGeneration __a : int = ["audio"] __a : Optional[Any] = ["text"] def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]: """simple docstring""" return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple: """simple docstring""" return self.model.generate(inputs=__magic_name__ ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str: """simple docstring""" return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
644
1
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser snake_case_ : Tuple = logging.getLogger(__name__) torch.set_grad_enabled(False) snake_case_ : List[str] = "cuda" if torch.cuda.is_available() else "cpu" def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : List[str]=100, SCREAMING_SNAKE_CASE__ : Union[str, Any]=" " ) -> List[str]: UpperCAmelCase_ : Optional[Any] = text.split(SCREAMING_SNAKE_CASE__ ) return [character.join(text[i : i + n] ).strip() for i in range(0, len(SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__ )] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : dict ) -> dict: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = [], [] for title, text in zip(documents['''title'''], documents['''text'''] ): if text is not None: for passage in split_text(SCREAMING_SNAKE_CASE__ ): titles.append(title if title is not None else '''''' ) texts.append(SCREAMING_SNAKE_CASE__ ) return {"title": titles, "text": texts} def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : dict, SCREAMING_SNAKE_CASE__ : DPRContextEncoder, SCREAMING_SNAKE_CASE__ : DPRContextEncoderTokenizerFast ) -> dict: UpperCAmelCase_ : int = ctx_tokenizer( documents['''title'''], documents['''text'''], truncation=SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' )['''input_ids'''] UpperCAmelCase_ : int = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE__ ), return_dict=SCREAMING_SNAKE_CASE__ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : "RagExampleArguments", SCREAMING_SNAKE_CASE__ : "ProcessingArguments", SCREAMING_SNAKE_CASE__ : "IndexHnswArguments", ) -> Optional[int]: ###################################### logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way UpperCAmelCase_ : int = load_dataset( '''csv''', data_files=[rag_example_args.csv_path], split='''train''', delimiter='''\t''', column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words UpperCAmelCase_ : List[str] = dataset.map(SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, num_proc=processing_args.num_proc ) # And compute the embeddings UpperCAmelCase_ : List[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) UpperCAmelCase_ : List[str] = Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space UpperCAmelCase_ : Optional[int] = dataset.map( partial(SCREAMING_SNAKE_CASE__, ctx_encoder=SCREAMING_SNAKE_CASE__, ctx_tokenizer=SCREAMING_SNAKE_CASE__ ), batched=SCREAMING_SNAKE_CASE__, batch_size=processing_args.batch_size, features=SCREAMING_SNAKE_CASE__, ) # And finally save your dataset UpperCAmelCase_ : Tuple = os.path.join(rag_example_args.output_dir, '''my_knowledge_dataset''' ) dataset.save_to_disk(SCREAMING_SNAKE_CASE__ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search UpperCAmelCase_ : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d, index_hnsw_args.m, faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''', custom_index=SCREAMING_SNAKE_CASE__ ) # And save the index UpperCAmelCase_ : str = os.path.join(rag_example_args.output_dir, '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(SCREAMING_SNAKE_CASE__ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __a : __a : str = field( default=str(Path(lowerCamelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) __a : Optional[str] = field( default=lowerCamelCase , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) __a : str = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) __a : str = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) __a : Optional[str] = field( default=str(Path(lowerCamelCase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class __a : __a : Optional[int] = field( default=lowerCamelCase , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) __a : int = field( default=16 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class __a : __a : int = field( default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) __a : int = field( default=128 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) snake_case_ : int = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) snake_case_ ,snake_case_ ,snake_case_ : Optional[int] = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: snake_case_ : Optional[Any] = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y return abs(SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> Optional[int]: try: UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) UpperCAmelCase_ : Optional[int] = int(nums[0] ) UpperCAmelCase_ : List[Any] = int(nums[1] ) print( F"""greatest_common_divisor({num_a}, {num_a}) = """ F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]: UpperCAmelCase_ : int = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Any ) -> str: UpperCAmelCase_ : Tuple = 0 while b > 0: if b & 1: UpperCAmelCase_ : Any = ((res % c) + (a % c)) % c a += a b >>= 1 return res
644
'''simple docstring''' import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class __a : def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str: """simple docstring""" UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : Optional[int] = batch_size UpperCAmelCase_ : List[str] = seq_length UpperCAmelCase_ : Dict = is_training UpperCAmelCase_ : List[str] = use_input_mask UpperCAmelCase_ : Any = use_token_type_ids UpperCAmelCase_ : Any = use_labels UpperCAmelCase_ : Any = vocab_size UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : Tuple = num_hidden_layers UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : int = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : Optional[int] = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Union[str, Any] = max_position_embeddings UpperCAmelCase_ : int = type_vocab_size UpperCAmelCase_ : List[Any] = type_sequence_label_size UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Dict = num_labels UpperCAmelCase_ : List[str] = scope UpperCAmelCase_ : List[str] = range_bbox def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase_ : List[str] = bbox[i, j, 3] UpperCAmelCase_ : Dict = bbox[i, j, 1] UpperCAmelCase_ : Optional[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase_ : List[str] = bbox[i, j, 2] UpperCAmelCase_ : Tuple = bbox[i, j, 0] UpperCAmelCase_ : Union[str, Any] = t UpperCAmelCase_ : int = None if self.use_input_mask: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCAmelCase_ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Dict = None UpperCAmelCase_ : Tuple = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Any = self.num_labels UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : List[Any] = model( __magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Optional[Any] = model( __magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase_ : Tuple = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : Tuple = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __a : Any = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) __a : Union[str, Any] = False __a : int = False def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str: """simple docstring""" return True def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = LiltModelTester(self ) UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : Tuple = type self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) @require_torch @slow class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ ) UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ ) UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ ) UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] ) UpperCAmelCase_ : List[str] = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , ) self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
644
1
'''simple docstring''' import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> List[Any]: # picklable for multiprocessing return x.sum() def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: # picklable for multiprocessing return i + 1 @dataclass class __a : __a : int __a : str class __a (lowerCamelCase ): def UpperCAmelCase__ ( self : Tuple ) -> int: """simple docstring""" UpperCAmelCase_ : List[str] = {} UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : str = 1 UpperCAmelCase_ : int = [1, 2] UpperCAmelCase_ : str = {'''a''': 1, '''b''': 2} UpperCAmelCase_ : str = {'''a''': [1, 2], '''b''': [3, 4]} UpperCAmelCase_ : str = {'''a''': {'''1''': 1}, '''b''': 2} UpperCAmelCase_ : List[str] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4} UpperCAmelCase_ : Dict = {} UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Optional[int] = 2 UpperCAmelCase_ : Optional[Any] = [2, 3] UpperCAmelCase_ : List[str] = {'''a''': 2, '''b''': 3} UpperCAmelCase_ : int = {'''a''': [2, 3], '''b''': [4, 5]} UpperCAmelCase_ : List[str] = {'''a''': {'''1''': 2}, '''b''': 3} UpperCAmelCase_ : Optional[int] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5} self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ ) UpperCAmelCase_ : Dict = 2 self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ ) self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ ) self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ ) self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ ) self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ ) self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ ) self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ ) self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ ) UpperCAmelCase_ : Optional[int] = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )} UpperCAmelCase_ : str = {'''a''': 2, '''b''': 0, '''c''': 2} UpperCAmelCase_ : int = { '''a''': np.eye(2 ).astype(__magic_name__ ), '''b''': np.zeros(3 ).astype(__magic_name__ ), '''c''': np.ones(2 ).astype(__magic_name__ ), } self.assertEqual(map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ ) , __magic_name__ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ , num_proc=__magic_name__ ) , __magic_name__ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ , num_proc=__magic_name__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(__magic_name__ ): # can't pickle a local lambda map_nested(lambda __magic_name__ : x + 1 , __magic_name__ , num_proc=__magic_name__ ) def UpperCAmelCase__ ( self : Dict ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[Any] = {'''a''': 1, '''b''': 2} UpperCAmelCase_ : Optional[int] = {'''a''': 3, '''b''': 4} UpperCAmelCase_ : Optional[int] = {'''a''': 5, '''b''': 6} UpperCAmelCase_ : Optional[int] = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(__magic_name__ , __magic_name__ , __magic_name__ ) ) , __magic_name__ ) def UpperCAmelCase__ ( self : List[Any] ) -> List[str]: """simple docstring""" class __a : __a : List[str] = "bar" UpperCAmelCase_ : Union[str, Any] = Foo() self.assertEqual(foo.my_attr , '''bar''' ) with temporary_assignment(__magic_name__ , '''my_attr''' , '''BAR''' ): self.assertEqual(foo.my_attr , '''BAR''' ) self.assertEqual(foo.my_attr , '''bar''' ) @pytest.mark.parametrize( '''iterable_length, num_proc, expected_num_proc''', [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ], ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]: with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch( '''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool: UpperCAmelCase_ : List[str] = {F"""{i}""": i for i in range(SCREAMING_SNAKE_CASE__ )} UpperCAmelCase_ : int = map_nested(lambda SCREAMING_SNAKE_CASE__ : x + 10, SCREAMING_SNAKE_CASE__, num_proc=SCREAMING_SNAKE_CASE__, parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class __a (lowerCamelCase ): @require_tf def UpperCAmelCase__ ( self : str ) -> List[str]: """simple docstring""" import tensorflow as tf from tensorflow.keras import layers UpperCAmelCase_ : int = layers.Dense(2 ) def gen_random_output(): UpperCAmelCase_ : int = tf.random.uniform((1, 3) ) return model(__magic_name__ ).numpy() with temp_seed(42 , set_tensorflow=__magic_name__ ): UpperCAmelCase_ : int = gen_random_output() with temp_seed(42 , set_tensorflow=__magic_name__ ): UpperCAmelCase_ : List[Any] = gen_random_output() UpperCAmelCase_ : Optional[int] = gen_random_output() np.testing.assert_equal(__magic_name__ , __magic_name__ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def UpperCAmelCase__ ( self : Optional[int] ) -> str: """simple docstring""" import torch def gen_random_output(): UpperCAmelCase_ : Optional[Any] = torch.nn.Linear(3 , 2 ) UpperCAmelCase_ : Tuple = torch.rand(1 , 3 ) return model(__magic_name__ ).detach().numpy() with temp_seed(42 , set_pytorch=__magic_name__ ): UpperCAmelCase_ : Union[str, Any] = gen_random_output() with temp_seed(42 , set_pytorch=__magic_name__ ): UpperCAmelCase_ : List[Any] = gen_random_output() UpperCAmelCase_ : List[Any] = gen_random_output() np.testing.assert_equal(__magic_name__ , __magic_name__ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): UpperCAmelCase_ : List[str] = gen_random_output() with temp_seed(42 ): UpperCAmelCase_ : List[str] = gen_random_output() UpperCAmelCase_ : str = gen_random_output() np.testing.assert_equal(__magic_name__ , __magic_name__ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize('''input_data''', [{}] ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any ) -> str: UpperCAmelCase_ : List[str] = NestedDataStructure(SCREAMING_SNAKE_CASE__ ).data assert output_data == input_data @pytest.mark.parametrize( '''data, expected_output''', [ ({}, []), ([], []), ('''foo''', ['''foo''']), (['''foo''', '''bar'''], ['''foo''', '''bar''']), ([['''foo''', '''bar''']], ['''foo''', '''bar''']), ([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']), ([[['''foo'''], '''bar''']], ['''foo''', '''bar''']), ({'''a''': 1, '''b''': 2}, [1, 2]), ({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]), ({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]), ({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]), ({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]), ({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]), ], ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]: UpperCAmelCase_ : List[str] = NestedDataStructure(SCREAMING_SNAKE_CASE__ ).flatten() assert output == expected_output def lowerCamelCase_ ( ) -> Dict: UpperCAmelCase_ : Union[str, Any] = A(x=1, y='''foobar''' ) UpperCAmelCase_ : int = {'''x''': 1, '''y''': '''foobar'''} assert asdict(SCREAMING_SNAKE_CASE__ ) == expected_output UpperCAmelCase_ : Tuple = {'''a''': {'''b''': A(x=10, y='''foo''' )}, '''c''': [A(x=20, y='''bar''' )]} UpperCAmelCase_ : List[Any] = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]} assert asdict(SCREAMING_SNAKE_CASE__ ) == expected_output with pytest.raises(SCREAMING_SNAKE_CASE__ ): asdict([1, A(x=10, y='''foo''' )] ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]: return text.split() def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]: yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def lowerCamelCase_ ( ) -> Optional[int]: with Pool(2 ) as pool: UpperCAmelCase_ : List[Any] = list(iflatmap_unordered(SCREAMING_SNAKE_CASE__, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) ) assert out.count('''hello''' ) == 10 assert out.count('''there''' ) == 10 assert len(SCREAMING_SNAKE_CASE__ ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: UpperCAmelCase_ : Optional[int] = list(iflatmap_unordered(SCREAMING_SNAKE_CASE__, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) ) assert out.count('''hello''' ) == 10 assert out.count('''there''' ) == 10 assert len(SCREAMING_SNAKE_CASE__ ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: UpperCAmelCase_ : str = [] for yield_time, content in iflatmap_unordered( SCREAMING_SNAKE_CASE__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(SCREAMING_SNAKE_CASE__ ) assert out.count('''a''' ) == 2 assert out.count('''b''' ) == 2 assert len(SCREAMING_SNAKE_CASE__ ) == 4
644
'''simple docstring''' import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case_ : str = logging.get_logger(__name__) snake_case_ : int = "▁" snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"} snake_case_ : int = { "sentencepiece_model_file": "sentencepiece.bpe.model", "vocab_file": "vocab.txt", } snake_case_ : Optional[Any] = { "vocab_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", }, "sentencepiece_model_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", }, } snake_case_ : Dict = { "ernie-m-base": 5_14, "ernie-m-large": 5_14, } snake_case_ : Any = { "ernie-m-base": {"do_lower_case": False}, "ernie-m-large": {"do_lower_case": False}, } class __a (lowerCamelCase ): __a : List[str] = ["input_ids"] __a : Union[str, Any] = VOCAB_FILES_NAMES __a : Tuple = PRETRAINED_INIT_CONFIGURATION __a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __a : Union[str, Any] = RESOURCE_FILES_NAMES def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) UpperCAmelCase_ : Optional[Any] = do_lower_case UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__magic_name__ ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ ) else: UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )} UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()} def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any: """simple docstring""" if text is None: return None UpperCAmelCase_ : str = self.tokenize(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', [] for i, ch in enumerate(__magic_name__ ): if ch in self.SP_CHAR_MAPPING: UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ ) else: UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ ) if self.is_whitespace(__magic_name__ ): continue normalized_text += ch char_mapping.extend([i] * len(__magic_name__ ) ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0 if self.do_lower_case: UpperCAmelCase_ : Optional[int] = text.lower() for token in split_tokens: if token[:1] == "▁": UpperCAmelCase_ : Tuple = token[1:] UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) UpperCAmelCase_ : int = end return token_mapping @property def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" return len(self.vocab ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self : str ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.__dict__.copy() UpperCAmelCase_ : Optional[Any] = None return state def __setstate__( self : str , __magic_name__ : Any ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCAmelCase_ : int = {} UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]: """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]: """simple docstring""" if self.sp_model_kwargs.get('''enable_sampling''' ) is True: UpperCAmelCase_ : Dict = True if self.sp_model_kwargs.get('''alpha''' ) is not None: UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' ) if self.sp_model_kwargs.get('''nbest_size''' ) is not None: UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' ) if not enable_sampling: UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ ) else: UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ ) UpperCAmelCase_ : List[Any] = [] for pi, piece in enumerate(__magic_name__ ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0: new_pieces.append(__magic_name__ ) continue else: continue UpperCAmelCase_ : List[str] = 0 for i, chunk in enumerate(__magic_name__ ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__magic_name__ ) UpperCAmelCase_ : List[Any] = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCAmelCase_ : List[str] = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCAmelCase_ : str = i if len(__magic_name__ ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip() return out_string def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ ) UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip() return out_string def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]: """simple docstring""" return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.reverse_vocab.get(__magic_name__ , self.unk_token ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id] UpperCAmelCase_ : List[Any] = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int: """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1] def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method if token_ids_a is None: # [CLS] X [SEP] return (len(__magic_name__ ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3) def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple: """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str: """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict: """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__magic_name__ ) == 1: UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ ) if cat == "Zs": return True return False def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = {} with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(__magic_name__ ): UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' ) UpperCAmelCase_ : Dict = int(__magic_name__ ) return token_to_idx def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = 0 if os.path.isdir(__magic_name__ ): UpperCAmelCase_ : Any = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) UpperCAmelCase_ : Dict = token_index writer.write(token + '''\n''' ) index += 1 UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' ) with open(__magic_name__ , '''wb''' ) as fi: UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (vocab_file,)
644
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case_ : Tuple = logging.get_logger(__name__) snake_case_ : Tuple = { "facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json", } class __a (lowerCamelCase , lowerCamelCase ): __a : List[str] = "convnextv2" def __init__( self : List[str] , __magic_name__ : List[Any]=3 , __magic_name__ : Tuple=4 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Tuple=None , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Optional[int]=1E-12 , __magic_name__ : int=0.0 , __magic_name__ : int=2_24 , __magic_name__ : str=None , __magic_name__ : Any=None , **__magic_name__ : Optional[Any] , ) -> Union[str, Any]: """simple docstring""" super().__init__(**__magic_name__ ) UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : Any = patch_size UpperCAmelCase_ : List[Any] = num_stages UpperCAmelCase_ : Dict = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes UpperCAmelCase_ : str = [3, 3, 9, 3] if depths is None else depths UpperCAmelCase_ : Optional[int] = hidden_act UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : Optional[int] = layer_norm_eps UpperCAmelCase_ : Any = drop_path_rate UpperCAmelCase_ : Optional[int] = image_size UpperCAmelCase_ : Optional[int] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_aligned_output_features_output_indices( out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str: if number > 0: raise ValueError('''input must be a negative integer''' ) UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] ) UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:] UpperCAmelCase_ : Optional[Any] = ( ( '''1''' + '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
644
1
'''simple docstring''' import numpy # List of input, output pairs snake_case_ : Dict = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) snake_case_ : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50)) snake_case_ : Union[str, Any] = [2, 4, 1, 5] snake_case_ : Optional[Any] = len(train_data) snake_case_ : Tuple = 0.009 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : str="train" ) -> Optional[int]: return calculate_hypothesis_value(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) - output( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]: UpperCAmelCase_ : Any = 0 for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : int ) -> Tuple: if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]: if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Dict=m ) -> Optional[int]: UpperCAmelCase_ : Dict = 0 for i in range(SCREAMING_SNAKE_CASE__ ): if index == -1: summation_value += _error(SCREAMING_SNAKE_CASE__ ) else: summation_value += _error(SCREAMING_SNAKE_CASE__ ) * train_data[i][0][index] return summation_value def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> Any: UpperCAmelCase_ : List[Any] = summation_of_cost_derivative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) / m return cost_derivative_value def lowerCamelCase_ ( ) -> List[str]: global parameter_vector # Tune these values to set a tolerance value for predicted output UpperCAmelCase_ : Optional[Any] = 0.00_00_02 UpperCAmelCase_ : Optional[int] = 0 UpperCAmelCase_ : Tuple = 0 while True: j += 1 UpperCAmelCase_ : List[Any] = [0, 0, 0, 0] for i in range(0, len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ : int = get_cost_derivative(i - 1 ) UpperCAmelCase_ : List[Any] = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, atol=SCREAMING_SNAKE_CASE__, rtol=SCREAMING_SNAKE_CASE__, ): break UpperCAmelCase_ : Any = temp_parameter_vector print(('''Number of iterations:''', j) ) def lowerCamelCase_ ( ) -> Optional[Any]: for i in range(len(SCREAMING_SNAKE_CASE__ ) ): print(('''Actual output value:''', output(SCREAMING_SNAKE_CASE__, '''test''' )) ) print(('''Hypothesis output:''', calculate_hypothesis_value(SCREAMING_SNAKE_CASE__, '''test''' )) ) if __name__ == "__main__": run_gradient_descent() print("\nTesting gradient descent for a linear hypothesis function.\n") test_gradient_descent()
644
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]] UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ ) self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) ) with self.assertRaises(__magic_name__ ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__magic_name__ ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__magic_name__ ): DisjunctiveConstraint(__magic_name__ ) # fails here def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 ) UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 ) UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : int ) -> Dict: """simple docstring""" UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
644
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __a (unittest.TestCase ): @property def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet UpperCAmelCase_ : Dict = KarrasVeScheduler() UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0] UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256''' UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ ) UpperCAmelCase_ : List[Any] = KarrasVeScheduler() UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
644
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None) snake_case_ : Optional[Any] = df.shape[:1][0] # If you're using some other dataset input the target column snake_case_ : Any = df.iloc[:, 1:2] snake_case_ : str = actual_data.values.reshape(len_data, 1) snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data) snake_case_ : List[str] = 10 snake_case_ : Any = 5 snake_case_ : Any = 20 snake_case_ : Tuple = len_data - periods * look_back snake_case_ : str = actual_data[:division] snake_case_ : Optional[int] = actual_data[division - look_back :] snake_case_ ,snake_case_ : Any = [], [] snake_case_ ,snake_case_ : Union[str, Any] = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) snake_case_ : Any = np.array(train_x) snake_case_ : Optional[Any] = np.array(test_x) snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y]) snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y]) snake_case_ : List[Any] = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss="mean_squared_error", optimizer="adam") snake_case_ : Dict = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) snake_case_ : Optional[Any] = model.predict(x_test)
644
1
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class __a (nn.Module ): def __init__( self : Any , __magic_name__ : int = 16 , __magic_name__ : int = 88 , __magic_name__ : Optional[int] = None , __magic_name__ : int = 1 , __magic_name__ : float = 0.0 , __magic_name__ : int = 32 , __magic_name__ : Optional[int] = None , __magic_name__ : bool = False , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[int] = None , __magic_name__ : str = "geglu" , __magic_name__ : Optional[int] = None , ) -> List[Any]: """simple docstring""" super().__init__() UpperCAmelCase_ : List[Any] = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference UpperCAmelCase_ : List[str] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` UpperCAmelCase_ : Optional[int] = [77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` UpperCAmelCase_ : Tuple = [1, 0] def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]=None , __magic_name__ : List[str]=None , __magic_name__ : bool = True , ) -> Any: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = hidden_states UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Any = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens UpperCAmelCase_ : Optional[Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] UpperCAmelCase_ : Any = self.transformer_index_for_condition[i] UpperCAmelCase_ : str = self.transformers[transformer_index]( __magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] UpperCAmelCase_ : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) UpperCAmelCase_ : Any = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__magic_name__ )
644
'''simple docstring''' from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker snake_case_ : Union[str, Any] = "CompVis/stable-diffusion-v1-1" snake_case_ : Dict = "CompVis/stable-diffusion-v1-2" snake_case_ : Any = "CompVis/stable-diffusion-v1-3" snake_case_ : str = "CompVis/stable-diffusion-v1-4" class __a (lowerCamelCase ): def __init__( self : Any , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ) -> str: """simple docstring""" super()._init_() UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : Tuple = StableDiffusionPipeline( vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, Any]: """simple docstring""" return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith('''_''' )} def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" self.enable_attention_slicing(__magic_name__ ) @torch.no_grad() def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Any , ) -> Any: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ) -> List[str]: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> str: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(__magic_name__ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.2 UpperCAmelCase_ : int = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.3 UpperCAmelCase_ : str = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.4 UpperCAmelCase_ : str = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
644
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case_ : List[Any] = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : str = [ "OPT_PRETRAINED_MODEL_ARCHIVE_LIST", "OPTForCausalLM", "OPTModel", "OPTPreTrainedModel", "OPTForSequenceClassification", "OPTForQuestionAnswering", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[Any] = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Dict = [ "FlaxOPTForCausalLM", "FlaxOPTModel", "FlaxOPTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys snake_case_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
644
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler snake_case_ : Optional[int] = 16 snake_case_ : Tuple = 32 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Accelerator, SCREAMING_SNAKE_CASE__ : int = 16, SCREAMING_SNAKE_CASE__ : str = "bert-base-cased" ) -> Dict: UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = load_dataset('''glue''', '''mrpc''' ) def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[int] ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase_ : Tuple = datasets.map( SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=SCREAMING_SNAKE_CASE__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' ) def collate_fn(SCREAMING_SNAKE_CASE__ : str ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' ) return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' ) # Instantiate dataloaders. UpperCAmelCase_ : str = DataLoader( tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = DataLoader( tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ ) return train_dataloader, eval_dataloader def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Any: model.eval() UpperCAmelCase_ : List[str] = 0 for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ : Dict = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(SCREAMING_SNAKE_CASE__ ) - 1: UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase_ : int = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, ) UpperCAmelCase_ : List[str] = metric.compute() return eval_metric["accuracy"] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int ) -> Tuple: # Initialize accelerator UpperCAmelCase_ : Union[str, Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ : int = config['''lr'''] UpperCAmelCase_ : Optional[int] = int(config['''num_epochs'''] ) UpperCAmelCase_ : Optional[int] = int(config['''seed'''] ) UpperCAmelCase_ : List[str] = int(config['''batch_size'''] ) UpperCAmelCase_ : Optional[int] = args.model_name_or_path set_seed(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, return_dict=SCREAMING_SNAKE_CASE__ ) # Instantiate optimizer UpperCAmelCase_ : str = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCAmelCase_ : List[str] = optimizer_cls(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ ) if accelerator.state.deepspeed_plugin is not None: UpperCAmelCase_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: UpperCAmelCase_ : Tuple = 1 UpperCAmelCase_ : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=0, num_training_steps=SCREAMING_SNAKE_CASE__, ) else: UpperCAmelCase_ : Any = DummyScheduler(SCREAMING_SNAKE_CASE__, total_num_steps=SCREAMING_SNAKE_CASE__, warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase_ : Union[str, Any] = 0 # We also need to keep track of the stating epoch so files are named properly UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : int = evaluate.load('''glue''', '''mrpc''' ) UpperCAmelCase_ : Optional[Any] = num_epochs if args.partial_train_epoch is not None: UpperCAmelCase_ : List[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) UpperCAmelCase_ : Tuple = args.resume_from_checkpoint.split('''epoch_''' )[1] UpperCAmelCase_ : int = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ ) + 1 UpperCAmelCase_ : Dict = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) accelerator.print('''resumed checkpoint performance:''', SCREAMING_SNAKE_CASE__ ) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] ) accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] ) with open(os.path.join(args.output_dir, F"""state_{starting_epoch-1}.json""" ), '''r''' ) as f: UpperCAmelCase_ : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model UpperCAmelCase_ : int = {} for epoch in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = outputs.loss UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 UpperCAmelCase_ : Tuple = F"""epoch_{epoch}""" UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir, SCREAMING_SNAKE_CASE__ ) accelerator.save_state(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = accuracy UpperCAmelCase_ : Any = lr_scheduler.get_lr()[0] UpperCAmelCase_ : List[str] = optimizer.param_groups[0]['''lr'''] UpperCAmelCase_ : Tuple = epoch UpperCAmelCase_ : Dict = overall_step accelerator.print(F"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir, F"""state_{epoch}.json""" ), '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> List[str]: UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, ) parser.add_argument( '''--output_dir''', type=SCREAMING_SNAKE_CASE__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', ) parser.add_argument( '''--resume_from_checkpoint''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If the training should continue from a checkpoint folder.''', ) parser.add_argument( '''--partial_train_epoch''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If passed, the training will stop after this number of epochs.''', ) parser.add_argument( '''--num_epochs''', type=SCREAMING_SNAKE_CASE__, default=2, help='''Number of train epochs.''', ) UpperCAmelCase_ : Optional[int] = parser.parse_args() UpperCAmelCase_ : List[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
644
1
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __a : @staticmethod def UpperCAmelCase__ ( *__magic_name__ : Optional[Any] , **__magic_name__ : Tuple ) -> int: """simple docstring""" pass @is_pipeline_test @require_vision @require_torch class __a (unittest.TestCase ): __a : Union[str, Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : str ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Dict = pipeline( '''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) UpperCAmelCase_ : int = [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] return object_detector, examples def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : str = object_detector(examples[0] , threshold=0.0 ) UpperCAmelCase_ : int = len(__magic_name__ ) self.assertGreater(__magic_name__ , 0 ) self.assertEqual( __magic_name__ , [ { '''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ ), '''box''': {'''xmin''': ANY(__magic_name__ ), '''ymin''': ANY(__magic_name__ ), '''xmax''': ANY(__magic_name__ ), '''ymax''': ANY(__magic_name__ )}, } for i in range(__magic_name__ ) ] , ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def UpperCAmelCase__ ( self : str ) -> Optional[int]: """simple docstring""" pass @require_torch def UpperCAmelCase__ ( self : str ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Tuple = pipeline( '''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) UpperCAmelCase_ : List[Any] = object_detector( '''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.6_4 , ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ {'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}}, {'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}}, {'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}}, {'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}}, {'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}}, {'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}}, {'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}}, {'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 2_74, '''xmax''': 93, '''ymax''': 2_97}}, {'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}}, ] , ) UpperCAmelCase_ : int = object_detector( [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] , threshold=0.6_4 , ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ [ {'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}}, {'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}}, {'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}}, {'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}}, {'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}}, {'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}}, {'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}}, {'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 2_74, '''xmax''': 93, '''ymax''': 2_97}}, {'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}}, ] ] , ) @require_torch @slow def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = pipeline('''zero-shot-object-detection''' ) UpperCAmelCase_ : Any = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}}, ] , ) UpperCAmelCase_ : Any = object_detector( [ { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, ] , ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}}, ], [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}}, ], ] , ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def UpperCAmelCase__ ( self : Any ) -> Dict: """simple docstring""" pass @require_torch @slow def UpperCAmelCase__ ( self : Any ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = 0.2 UpperCAmelCase_ : Union[str, Any] = pipeline('''zero-shot-object-detection''' ) UpperCAmelCase_ : Optional[int] = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__magic_name__ , ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}}, ] , ) @require_torch @slow def UpperCAmelCase__ ( self : Any ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Tuple = 2 UpperCAmelCase_ : Any = pipeline('''zero-shot-object-detection''' ) UpperCAmelCase_ : List[Any] = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__magic_name__ , ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}}, ] , )
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]: UpperCAmelCase_ : int = [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ : List[Any] = nums.pop(0 ) UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE__ ) result.extend(SCREAMING_SNAKE_CASE__ ) nums.append(SCREAMING_SNAKE_CASE__ ) return result def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): if start == len(SCREAMING_SNAKE_CASE__ ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start] backtrack(start + 1 ) UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack UpperCAmelCase_ : Optional[int] = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function snake_case_ : Tuple = permutea([1, 2, 3]) print(res) doctest.testmod()
644
1
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device snake_case_ : List[str] = False class __a (unittest.TestCase ): pass @nightly @require_torch_gpu class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = pipe.dual_guided( prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__magic_name__ ) UpperCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Any = generator.manual_seed(0 ) UpperCAmelCase_ : Dict = pipe.dual_guided( prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def UpperCAmelCase__ ( self : str ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = '''cyberpunk 2077''' UpperCAmelCase_ : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = pipe.dual_guided( prompt=__magic_name__ , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images UpperCAmelCase_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Union[str, Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger ''' UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : List[Any] = pipe.text_to_image( prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images UpperCAmelCase_ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Any = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase_ : Tuple = pipe.image_variation(__magic_name__ , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
644
'''simple docstring''' class __a : def __init__( self : List[Any] , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : Optional[Any] = size UpperCAmelCase_ : Tuple = [0] * size UpperCAmelCase_ : Optional[Any] = [0] * size @staticmethod def UpperCAmelCase__ ( __magic_name__ : int ) -> int: """simple docstring""" return index | (index + 1) @staticmethod def UpperCAmelCase__ ( __magic_name__ : int ) -> int: """simple docstring""" return (index & (index + 1)) - 1 def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : int = value while index < self.size: UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1 if current_left_border == index: UpperCAmelCase_ : List[str] = value else: UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ ) def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" right -= 1 # Because of right is exclusive UpperCAmelCase_ : List[str] = 0 while left <= right: UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ ) if left <= current_left: UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] ) UpperCAmelCase_ : Optional[Any] = current_left else: UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
644
1
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black snake_case_ : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. snake_case_ : str = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n" class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" UpperCAmelCase_ : Any = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) ) UpperCAmelCase_ : Union[str, Any] = self.transformer_dir shutil.copy( os.path.join(__magic_name__ , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , ) def UpperCAmelCase__ ( self : List[str] ) -> int: """simple docstring""" UpperCAmelCase_ : Tuple = '''src/transformers''' shutil.rmtree(self.transformer_dir ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Union[str, Any]=None ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: UpperCAmelCase_ : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result UpperCAmelCase_ : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) UpperCAmelCase_ : Tuple = black.format_str(__magic_name__ , mode=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = os.path.join(self.transformer_dir , '''new_code.py''' ) with open(__magic_name__ , '''w''' , newline='''\n''' ) as f: f.write(__magic_name__ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(__magic_name__ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=__magic_name__ ) with open(__magic_name__ , '''r''' ) as f: self.assertTrue(f.read() , __magic_name__ ) def UpperCAmelCase__ ( self : Any ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' ) self.assertEqual(__magic_name__ , __magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" # Base copy consistency self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __magic_name__ , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __magic_name__ ) , ) # Copy consistency with a really long name UpperCAmelCase_ : Tuple = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , F"""{long_class_name}LMPredictionHead""" , re.sub('''Bert''' , __magic_name__ , __magic_name__ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __magic_name__ , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __magic_name__ ) , ) def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[int] = check_copies.LOCALIZED_READMES['''README_zh-hans.md'''] UpperCAmelCase_ : Dict = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),''' ''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**''' ''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders''' ''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang''' ''' Luong, Quoc V. Le, Christopher D. Manning.''' ) UpperCAmelCase_ : List[Any] = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) UpperCAmelCase_ : Any = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文''' ''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自''' ''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather''' ''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,''' ''' Christopher D. Manning 发布。\n''' ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = check_copies.convert_to_localized_md( __magic_name__ , __magic_name__ , localized_readme['''format_model_list'''] ) self.assertFalse(__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = check_copies.convert_to_localized_md( __magic_name__ , __magic_name__ , localized_readme['''format_model_list'''] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.''' ) UpperCAmelCase_ : Union[str, Any] = ( '''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and''' ''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) UpperCAmelCase_ : Optional[int] = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) UpperCAmelCase_ , UpperCAmelCase_ : Any = check_copies.convert_to_localized_md( __magic_name__ , __magic_name__ , localized_readme['''format_model_list'''] ) # Check if the model link is synchronized. self.assertEqual(__magic_name__ , __magic_name__ )
644
'''simple docstring''' import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __a : def __init__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=99 , __magic_name__ : Tuple=32 , __magic_name__ : int=5 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[int]=None , ) -> str: """simple docstring""" UpperCAmelCase_ : Any = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : List[Any] = seq_length UpperCAmelCase_ : str = is_training UpperCAmelCase_ : Any = use_input_mask UpperCAmelCase_ : List[str] = use_token_type_ids UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : Optional[Any] = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : Optional[int] = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : str = type_vocab_size UpperCAmelCase_ : Optional[Any] = type_sequence_label_size UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : int = num_labels UpperCAmelCase_ : Optional[int] = num_choices UpperCAmelCase_ : Tuple = scope def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : str = None if self.use_token_type_ids: UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = BioGptModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptForCausalLM(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , *__magic_name__ : Any ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() # create attention mask UpperCAmelCase_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ ) UpperCAmelCase_ : Any = self.seq_length // 2 UpperCAmelCase_ : Tuple = 0 # first forward pass UpperCAmelCase_ , UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ).to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids UpperCAmelCase_ : List[str] = ids_tensor((1,) , __magic_name__ ).item() + 1 UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) UpperCAmelCase_ : str = random_other_next_tokens # append to next input_ids and attn_mask UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : int = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__magic_name__ )] , dim=1 , ) # get two different outputs UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] UpperCAmelCase_ : int = model(__magic_name__ , past_key_values=__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] # select random slice UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach() UpperCAmelCase_ : Dict = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , *__magic_name__ : str ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval() UpperCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ ) # first forward pass UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[ '''last_hidden_state''' ] # select random slice UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , *__magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = BioGptForCausalLM(__magic_name__ ) model.to(__magic_name__ ) if gradient_checkpointing: model.gradient_checkpointing_enable() UpperCAmelCase_ : List[str] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : List[str] ) -> str: """simple docstring""" UpperCAmelCase_ : int = BioGptModel(__magic_name__ ) UpperCAmelCase_ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 ) def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , *__magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : Any = BioGptForTokenClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : int = config_and_inputs UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : str = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) __a : List[Any] = (BioGptForCausalLM,) if is_torch_available() else () __a : Union[str, Any] = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) __a : List[str] = False def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" UpperCAmelCase_ : List[str] = BioGptModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : str = type self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*__magic_name__ , gradient_checkpointing=__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[str]: """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ ) def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__magic_name__ ) UpperCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : Tuple = '''left''' # Define PAD Token = EOS Token = 50256 UpperCAmelCase_ : List[Any] = tokenizer.eos_token UpperCAmelCase_ : List[Any] = model.config.eos_token_id # use different length sentences to test batching UpperCAmelCase_ : Tuple = [ '''Hello, my dog is a little''', '''Today, I''', ] UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = inputs['''input_ids'''].to(__magic_name__ ) UpperCAmelCase_ : Any = model.generate( input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''].to(__magic_name__ ) , ) UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__magic_name__ ) UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ ) UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item() UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__magic_name__ ) UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings ) UpperCAmelCase_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = [ '''Hello, my dog is a little bit bigger than a little bit.''', '''Today, I have a good idea of how to use the information''', ] self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[str] = 3 UpperCAmelCase_ : Tuple = input_dict['''input_ids'''] UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(__magic_name__ ) UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase_ : Dict = BioGptForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[Any] = 3 UpperCAmelCase_ : Optional[int] = '''multi_label_classification''' UpperCAmelCase_ : int = input_dict['''input_ids'''] UpperCAmelCase_ : str = input_ids.ne(1 ).to(__magic_name__ ) UpperCAmelCase_ : Any = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __a (unittest.TestCase ): @slow def UpperCAmelCase__ ( self : List[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) UpperCAmelCase_ : str = model(__magic_name__ )[0] UpperCAmelCase_ : Optional[int] = 4_23_84 UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , __magic_name__ ) UpperCAmelCase_ : List[Any] = torch.tensor( [[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__magic_name__ ) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__magic_name__ ) UpperCAmelCase_ : Optional[int] = model.generate( **__magic_name__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__magic_name__ , ) UpperCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = ( '''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the''' ''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and''' ''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),''' ''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and''' ''' more than 800,000 deaths.''' ) self.assertEqual(__magic_name__ , __magic_name__ )
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> int: UpperCAmelCase_ : str = hex_num.strip() if not hex_num: raise ValueError('''No value was passed to the function''' ) UpperCAmelCase_ : Any = hex_num[0] == '''-''' if is_negative: UpperCAmelCase_ : Any = hex_num[1:] try: UpperCAmelCase_ : List[str] = int(SCREAMING_SNAKE_CASE__, 16 ) except ValueError: raise ValueError('''Invalid value was passed to the function''' ) UpperCAmelCase_ : str = '''''' while int_num > 0: UpperCAmelCase_ : Dict = str(int_num % 2 ) + bin_str int_num >>= 1 return int(('''-''' + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
644
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class __a (lowerCamelCase , unittest.TestCase ): __a : List[str] = BlenderbotSmallTokenizer __a : List[Any] = False def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" super().setUp() UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__magic_name__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple: """simple docstring""" kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = '''adapt act apte''' UpperCAmelCase_ : Tuple = '''adapt act apte''' return input_text, output_text def UpperCAmelCase__ ( self : str ) -> Any: """simple docstring""" UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase_ : List[Any] = '''adapt act apte''' UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te'''] UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] UpperCAmelCase_ : Optional[int] = '''I am a small frog.''' UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids'''] UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) UpperCAmelCase_ : List[Any] = '''I am a small frog .''' UpperCAmelCase_ : Any = '''.''' UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids'''] UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
644
1
'''simple docstring''' import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP snake_case_ : Any = False try: snake_case_ : Optional[int] = _is_package_available("google.colab") except ModuleNotFoundError: pass @input.register class __a : def __init__( self : str , __magic_name__ : str = None , __magic_name__ : list = [] ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : List[str] = choices UpperCAmelCase_ : Any = prompt if sys.platform == "win32": UpperCAmelCase_ : List[Any] = '''*''' else: UpperCAmelCase_ : List[Any] = '''➔ ''' def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : str = "" ) -> Tuple: """simple docstring""" if sys.platform != "win32": writeColor(self.choices[index] , 32 , __magic_name__ ) else: forceWrite(self.choices[index] , __magic_name__ ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> str: """simple docstring""" if index == self.position: forceWrite(F""" {self.arrow_char} """ ) self.write_choice(__magic_name__ ) else: forceWrite(F""" {self.choices[index]}""" ) reset_cursor() def UpperCAmelCase__ ( self : str , __magic_name__ : Direction , __magic_name__ : int = 1 ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(__magic_name__ ) move_cursor(__magic_name__ , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP['''up'''] ) def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" self.move_direction(Direction.UP ) @input.mark(KEYMAP['''down'''] ) def UpperCAmelCase__ ( self : str ) -> Tuple: """simple docstring""" self.move_direction(Direction.DOWN ) @input.mark(KEYMAP['''newline'''] ) def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" move_cursor(len(self.choices ) - self.position , '''DOWN''' ) return self.position @input.mark(KEYMAP['''interrupt'''] ) def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" move_cursor(len(self.choices ) - self.position , '''DOWN''' ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(__magic_name__ )] for number in range(10 )] ) def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = int(chr(self.current_selection ) ) UpperCAmelCase_ : List[str] = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , __magic_name__ ) else: return else: return def UpperCAmelCase__ ( self : Any , __magic_name__ : int = 0 ) -> Union[str, Any]: """simple docstring""" if self.prompt: linebreak() forceWrite(self.prompt , '''\n''' ) if in_colab: forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' ) else: forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' ) UpperCAmelCase_ : Tuple = default_choice for i in range(len(self.choices ) ): self.print_choice(__magic_name__ ) forceWrite('''\n''' ) move_cursor(len(self.choices ) - self.position , '''UP''' ) with cursor.hide(): while True: if in_colab: try: UpperCAmelCase_ : int = int(builtins.input() ) except ValueError: UpperCAmelCase_ : Tuple = default_choice else: UpperCAmelCase_ : Dict = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , '''UP''' ) clear_line() self.write_choice(__magic_name__ , '''\n''' ) return choice
644
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = get_activation('''swish''' ) self.assertIsInstance(__magic_name__ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' ) self.assertIsInstance(__magic_name__ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_activation('''mish''' ) self.assertIsInstance(__magic_name__ , nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = get_activation('''gelu''' ) self.assertIsInstance(__magic_name__ , nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
644
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING snake_case_ : int = logging.get_logger(__name__) snake_case_ : str = { "SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json", # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class __a (lowerCamelCase ): __a : Optional[Any] = "deformable_detr" __a : Optional[Any] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : List[Any] , __magic_name__ : str=True , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=3 , __magic_name__ : int=3_00 , __magic_name__ : str=10_24 , __magic_name__ : List[Any]=6 , __magic_name__ : Dict=10_24 , __magic_name__ : Optional[int]=8 , __magic_name__ : List[Any]=6 , __magic_name__ : Dict=10_24 , __magic_name__ : int=8 , __magic_name__ : Tuple=0.0 , __magic_name__ : Tuple=True , __magic_name__ : Tuple="relu" , __magic_name__ : Union[str, Any]=2_56 , __magic_name__ : Dict=0.1 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : int=0.0_2 , __magic_name__ : Dict=1.0 , __magic_name__ : Dict=True , __magic_name__ : str=False , __magic_name__ : str="sine" , __magic_name__ : List[Any]="resnet50" , __magic_name__ : List[str]=True , __magic_name__ : Optional[int]=False , __magic_name__ : List[str]=4 , __magic_name__ : List[Any]=4 , __magic_name__ : Optional[Any]=4 , __magic_name__ : List[str]=False , __magic_name__ : Tuple=3_00 , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=1 , __magic_name__ : int=5 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Optional[int]=1 , __magic_name__ : Tuple=1 , __magic_name__ : Optional[Any]=5 , __magic_name__ : Optional[int]=2 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Dict=0.2_5 , __magic_name__ : Tuple=False , **__magic_name__ : List[Any] , ) -> Tuple: """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) UpperCAmelCase_ : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(__magic_name__ , __magic_name__ ): UpperCAmelCase_ : Tuple = backbone_config.get('''model_type''' ) UpperCAmelCase_ : Optional[int] = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ : Any = config_class.from_dict(__magic_name__ ) UpperCAmelCase_ : Dict = use_timm_backbone UpperCAmelCase_ : List[str] = backbone_config UpperCAmelCase_ : Union[str, Any] = num_channels UpperCAmelCase_ : int = num_queries UpperCAmelCase_ : Optional[Any] = max_position_embeddings UpperCAmelCase_ : Dict = d_model UpperCAmelCase_ : Tuple = encoder_ffn_dim UpperCAmelCase_ : List[Any] = encoder_layers UpperCAmelCase_ : Any = encoder_attention_heads UpperCAmelCase_ : List[Any] = decoder_ffn_dim UpperCAmelCase_ : Optional[Any] = decoder_layers UpperCAmelCase_ : Any = decoder_attention_heads UpperCAmelCase_ : Optional[int] = dropout UpperCAmelCase_ : Union[str, Any] = attention_dropout UpperCAmelCase_ : Tuple = activation_dropout UpperCAmelCase_ : Optional[int] = activation_function UpperCAmelCase_ : Dict = init_std UpperCAmelCase_ : int = init_xavier_std UpperCAmelCase_ : List[str] = encoder_layerdrop UpperCAmelCase_ : List[Any] = auxiliary_loss UpperCAmelCase_ : Optional[int] = position_embedding_type UpperCAmelCase_ : Union[str, Any] = backbone UpperCAmelCase_ : Optional[Any] = use_pretrained_backbone UpperCAmelCase_ : Dict = dilation # deformable attributes UpperCAmelCase_ : Optional[int] = num_feature_levels UpperCAmelCase_ : str = encoder_n_points UpperCAmelCase_ : Tuple = decoder_n_points UpperCAmelCase_ : Dict = two_stage UpperCAmelCase_ : Optional[int] = two_stage_num_proposals UpperCAmelCase_ : Dict = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher UpperCAmelCase_ : Union[str, Any] = class_cost UpperCAmelCase_ : List[str] = bbox_cost UpperCAmelCase_ : Dict = giou_cost # Loss coefficients UpperCAmelCase_ : Tuple = mask_loss_coefficient UpperCAmelCase_ : int = dice_loss_coefficient UpperCAmelCase_ : Optional[Any] = bbox_loss_coefficient UpperCAmelCase_ : Any = giou_loss_coefficient UpperCAmelCase_ : List[Any] = eos_coefficient UpperCAmelCase_ : Dict = focal_alpha UpperCAmelCase_ : int = disable_custom_kernels super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ ) @property def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" return self.encoder_attention_heads @property def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" return self.d_model def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: UpperCAmelCase_ : str = self.backbone_config.to_dict() UpperCAmelCase_ : List[Any] = self.__class__.model_type return output
644
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging snake_case_ : Union[str, Any] = logging.get_logger(__name__) class __a (lowerCamelCase ): __a : Tuple = ["pixel_values"] def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None: """simple docstring""" UpperCAmelCase_ : int = do_resize UpperCAmelCase_ : Tuple = do_rescale UpperCAmelCase_ : List[Any] = size_divisor UpperCAmelCase_ : Any = resample super().__init__(**__magic_name__ ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ ) # Rounds the height and width down to the closest multiple of size_divisor UpperCAmelCase_ : Dict = height // size_divisor * size_divisor UpperCAmelCase_ : Dict = width // size_divisor * size_divisor UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) return image def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray: """simple docstring""" return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature: """simple docstring""" UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor UpperCAmelCase_ : Dict = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ ) if not valid_images(__magic_name__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images] if do_resize: UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images] if do_rescale: UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images] UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images] UpperCAmelCase_ : int = {'''pixel_values''': images} return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
644
1
'''simple docstring''' from __future__ import annotations from typing import Any class __a : def __init__( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float = 0 ) -> None: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = row, column UpperCAmelCase_ : Any = [[default_value for c in range(__magic_name__ )] for r in range(__magic_name__ )] def __str__( self : Any ) -> str: """simple docstring""" UpperCAmelCase_ : Dict = F"""Matrix consist of {self.row} rows and {self.column} columns\n""" # Make string identifier UpperCAmelCase_ : Tuple = 0 for row_vector in self.array: for obj in row_vector: UpperCAmelCase_ : Optional[Any] = max(__magic_name__ , len(str(__magic_name__ ) ) ) UpperCAmelCase_ : Union[str, Any] = F"""%{max_element_length}s""" # Make string and return def single_line(__magic_name__ : list[float] ) -> str: nonlocal string_format_identifier UpperCAmelCase_ : Dict = '''[''' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(__magic_name__ ) for row_vector in self.array ) return s def __repr__( self : Tuple ) -> str: """simple docstring""" return str(self ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : tuple[int, int] ) -> bool: """simple docstring""" if not (isinstance(__magic_name__ , (list, tuple) ) and len(__magic_name__ ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self : Optional[Any] , __magic_name__ : tuple[int, int] ) -> Any: """simple docstring""" assert self.validate_indicies(__magic_name__ ) return self.array[loc[0]][loc[1]] def __setitem__( self : str , __magic_name__ : tuple[int, int] , __magic_name__ : float ) -> None: """simple docstring""" assert self.validate_indicies(__magic_name__ ) UpperCAmelCase_ : Optional[int] = value def __add__( self : Any , __magic_name__ : Matrix ) -> Matrix: """simple docstring""" assert isinstance(__magic_name__ , __magic_name__ ) assert self.row == another.row and self.column == another.column # Add UpperCAmelCase_ : Optional[Any] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): UpperCAmelCase_ : str = self[r, c] + another[r, c] return result def __neg__( self : Optional[Any] ) -> Matrix: """simple docstring""" UpperCAmelCase_ : Dict = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): UpperCAmelCase_ : str = -self[r, c] return result def __sub__( self : Dict , __magic_name__ : Matrix ) -> Matrix: """simple docstring""" return self + (-another) def __mul__( self : Tuple , __magic_name__ : int | float | Matrix ) -> Matrix: """simple docstring""" if isinstance(__magic_name__ , (int, float) ): # Scalar multiplication UpperCAmelCase_ : Tuple = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): UpperCAmelCase_ : List[str] = self[r, c] * another return result elif isinstance(__magic_name__ , __magic_name__ ): # Matrix multiplication assert self.column == another.row UpperCAmelCase_ : Dict = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: UpperCAmelCase_ : List[str] = F"""Unsupported type given for another ({type(__magic_name__ )})""" raise TypeError(__magic_name__ ) def UpperCAmelCase__ ( self : List[str] ) -> Matrix: """simple docstring""" UpperCAmelCase_ : Tuple = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): UpperCAmelCase_ : List[Any] = self[r, c] return result def UpperCAmelCase__ ( self : Any , __magic_name__ : Matrix , __magic_name__ : Matrix ) -> Any: """simple docstring""" assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate UpperCAmelCase_ : str = v.transpose() UpperCAmelCase_ : Dict = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def lowerCamelCase_ ( ) -> None: # a^(-1) UpperCAmelCase_ : Any = Matrix(3, 3, 0 ) for i in range(3 ): UpperCAmelCase_ : List[str] = 1 print(F"""a^(-1) is {ainv}""" ) # u, v UpperCAmelCase_ : List[Any] = Matrix(3, 1, 0 ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = 1, 2, -3 UpperCAmelCase_ : Optional[Any] = Matrix(3, 1, 0 ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = 4, -2, 5 print(F"""u is {u}""" ) print(F"""v is {v}""" ) print(F"""uv^T is {u * v.transpose()}""" ) # Sherman Morrison print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) def lowerCamelCase_ ( ) -> None: import doctest doctest.testmod() testa()
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int: UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'''{solution(10, 22) = }''')
644
1
'''simple docstring''' import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion" ) snake_case_ : List[Any] = None snake_case_ : Union[str, Any] = { "7B": 1_10_08, "13B": 1_38_24, "30B": 1_79_20, "65B": 2_20_16, "70B": 2_86_72, } snake_case_ : Dict = { "7B": 1, "7Bf": 1, "13B": 2, "13Bf": 2, "30B": 4, "65B": 8, "70B": 8, "70Bf": 8, } def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : Tuple=1, SCREAMING_SNAKE_CASE__ : List[str]=256 ) -> Dict: return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]: with open(SCREAMING_SNAKE_CASE__, '''r''' ) as f: return json.load(SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]: with open(SCREAMING_SNAKE_CASE__, '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : List[Any]=True ) -> Dict: os.makedirs(SCREAMING_SNAKE_CASE__, exist_ok=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = os.path.join(SCREAMING_SNAKE_CASE__, '''tmp''' ) os.makedirs(SCREAMING_SNAKE_CASE__, exist_ok=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = read_json(os.path.join(SCREAMING_SNAKE_CASE__, '''params.json''' ) ) UpperCAmelCase_ : Dict = NUM_SHARDS[model_size] UpperCAmelCase_ : Any = params['''n_layers'''] UpperCAmelCase_ : Union[str, Any] = params['''n_heads'''] UpperCAmelCase_ : Optional[int] = n_heads // num_shards UpperCAmelCase_ : List[str] = params['''dim'''] UpperCAmelCase_ : Tuple = dim // n_heads UpperCAmelCase_ : Optional[Any] = 1_00_00.0 UpperCAmelCase_ : Optional[int] = 1.0 / (base ** (torch.arange(0, SCREAMING_SNAKE_CASE__, 2 ).float() / dims_per_head)) if "n_kv_heads" in params: UpperCAmelCase_ : Tuple = params['''n_kv_heads'''] # for GQA / MQA UpperCAmelCase_ : str = n_heads_per_shard // num_key_value_heads UpperCAmelCase_ : str = dim // num_key_value_heads else: # compatibility with other checkpoints UpperCAmelCase_ : List[Any] = n_heads UpperCAmelCase_ : List[Any] = n_heads_per_shard UpperCAmelCase_ : Tuple = dim # permute for sliced rotary def permute(SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : int=n_heads, SCREAMING_SNAKE_CASE__ : List[Any]=dim, SCREAMING_SNAKE_CASE__ : Dict=dim ): return w.view(SCREAMING_SNAKE_CASE__, dima // n_heads // 2, 2, SCREAMING_SNAKE_CASE__ ).transpose(1, 2 ).reshape(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""" ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) UpperCAmelCase_ : int = torch.load(os.path.join(SCREAMING_SNAKE_CASE__, '''consolidated.00.pth''' ), map_location='''cpu''' ) else: # Sharded UpperCAmelCase_ : List[str] = [ torch.load(os.path.join(SCREAMING_SNAKE_CASE__, F"""consolidated.{i:02d}.pth""" ), map_location='''cpu''' ) for i in range(SCREAMING_SNAKE_CASE__ ) ] UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : str = {'''weight_map''': {}} for layer_i in range(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Any = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin""" if model_size == "7B": # Unsharded UpperCAmelCase_ : Optional[int] = { F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute( loaded[F"""layers.{layer_i}.attention.wq.weight"""] ), F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute( loaded[F"""layers.{layer_i}.attention.wk.weight"""] ), F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""], F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""], F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""], F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""], F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""], F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""], F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. UpperCAmelCase_ : Optional[int] = { F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][ F"""layers.{layer_i}.attention_norm.weight""" ].clone(), F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][ F"""layers.{layer_i}.ffn_norm.weight""" ].clone(), } UpperCAmelCase_ : Optional[int] = permute( torch.cat( [ loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ) ], dim=0, ).reshape(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ) UpperCAmelCase_ : Any = permute( torch.cat( [ loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ) ], dim=0, ).reshape(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, ) UpperCAmelCase_ : Any = torch.cat( [ loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ) ], dim=0, ).reshape(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = torch.cat( [loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(SCREAMING_SNAKE_CASE__ )], dim=1 ) UpperCAmelCase_ : str = torch.cat( [loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(SCREAMING_SNAKE_CASE__ )], dim=0 ) UpperCAmelCase_ : Tuple = torch.cat( [loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(SCREAMING_SNAKE_CASE__ )], dim=1 ) UpperCAmelCase_ : Tuple = torch.cat( [loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(SCREAMING_SNAKE_CASE__ )], dim=0 ) UpperCAmelCase_ : Optional[int] = inv_freq for k, v in state_dict.items(): UpperCAmelCase_ : Dict = filename param_count += v.numel() torch.save(SCREAMING_SNAKE_CASE__, os.path.join(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ) UpperCAmelCase_ : str = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin""" if model_size == "7B": # Unsharded UpperCAmelCase_ : Optional[Any] = { '''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''], '''model.norm.weight''': loaded['''norm.weight'''], '''lm_head.weight''': loaded['''output.weight'''], } else: UpperCAmelCase_ : str = { '''model.norm.weight''': loaded[0]['''norm.weight'''], '''model.embed_tokens.weight''': torch.cat( [loaded[i]['''tok_embeddings.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )], dim=1 ), '''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )], dim=0 ), } for k, v in state_dict.items(): UpperCAmelCase_ : List[str] = filename param_count += v.numel() torch.save(SCREAMING_SNAKE_CASE__, os.path.join(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ) # Write configs UpperCAmelCase_ : Any = {'''total_size''': param_count * 2} write_json(SCREAMING_SNAKE_CASE__, os.path.join(SCREAMING_SNAKE_CASE__, '''pytorch_model.bin.index.json''' ) ) UpperCAmelCase_ : str = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1 UpperCAmelCase_ : List[Any] = params['''multiple_of'''] if '''multiple_of''' in params else 256 UpperCAmelCase_ : str = LlamaConfig( hidden_size=SCREAMING_SNAKE_CASE__, intermediate_size=compute_intermediate_size(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), num_attention_heads=params['''n_heads'''], num_hidden_layers=params['''n_layers'''], rms_norm_eps=params['''norm_eps'''], num_key_value_heads=SCREAMING_SNAKE_CASE__, ) config.save_pretrained(SCREAMING_SNAKE_CASE__ ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('''Loading the checkpoint in a Llama model.''' ) UpperCAmelCase_ : int = LlamaForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__, torch_dtype=torch.floataa, low_cpu_mem_usage=SCREAMING_SNAKE_CASE__ ) # Avoid saving this as part of the config. del model.config._name_or_path print('''Saving in the Transformers format.''' ) model.save_pretrained(SCREAMING_SNAKE_CASE__, safe_serialization=SCREAMING_SNAKE_CASE__ ) shutil.rmtree(SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]: # Initialize the tokenizer based on the `spm` model UpperCAmelCase_ : Optional[int] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" ) UpperCAmelCase_ : List[str] = tokenizer_class(SCREAMING_SNAKE_CASE__ ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> str: UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '''--input_dir''', help='''Location of LLaMA weights, which contains tokenizer.model and model folders''', ) parser.add_argument( '''--model_size''', choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''], ) parser.add_argument( '''--output_dir''', help='''Location to write HF model and tokenizer''', ) parser.add_argument('''--safe_serialization''', type=SCREAMING_SNAKE_CASE__, help='''Whether or not to save using `safetensors`.''' ) UpperCAmelCase_ : Union[str, Any] = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size ), model_size=args.model_size, safe_serialization=args.safe_serialization, ) UpperCAmelCase_ : Any = os.path.join(args.input_dir, '''tokenizer.model''' ) write_tokenizer(args.output_dir, SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
644
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __a (lowerCamelCase ): __a : int = "dandelin/vilt-b32-finetuned-vqa" __a : Any = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) __a : Any = "image_qa" __a : str = AutoProcessor __a : Any = AutoModelForVisualQuestionAnswering __a : List[Any] = ["image", "text"] __a : int = ["text"] def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Any ) -> Tuple: """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Image" , __magic_name__ : str ) -> Tuple: """simple docstring""" return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" with torch.no_grad(): return self.model(**__magic_name__ ).logits def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Dict = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
644
1
'''simple docstring''' from __future__ import annotations from typing import Any class __a : def __init__( self : List[Any] , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : List[Any] = num_of_nodes UpperCAmelCase_ : list[list[int]] = [] UpperCAmelCase_ : dict[int, int] = {} def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> None: """simple docstring""" self.m_edges.append([u_node, v_node, weight] ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> int: """simple docstring""" if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : int ) -> None: """simple docstring""" if self.m_component[u_node] != u_node: for k in self.m_component: UpperCAmelCase_ : int = self.find_component(__magic_name__ ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : int ) -> None: """simple docstring""" if component_size[u_node] <= component_size[v_node]: UpperCAmelCase_ : Dict = v_node component_size[v_node] += component_size[u_node] self.set_component(__magic_name__ ) elif component_size[u_node] >= component_size[v_node]: UpperCAmelCase_ : str = self.find_component(__magic_name__ ) component_size[u_node] += component_size[v_node] self.set_component(__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> None: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = [] UpperCAmelCase_ : List[str] = 0 UpperCAmelCase_ : list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) UpperCAmelCase_ : Optional[int] = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = edge UpperCAmelCase_ : List[Any] = self.m_component[u] UpperCAmelCase_ : List[Any] = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): UpperCAmelCase_ : Dict = [u, v, w] for edge in minimum_weight_edge: if isinstance(__magic_name__ , __magic_name__ ): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = edge UpperCAmelCase_ : int = self.m_component[u] UpperCAmelCase_ : int = self.m_component[v] if u_component != v_component: mst_weight += w self.union(__magic_name__ , __magic_name__ , __magic_name__ ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 UpperCAmelCase_ : Optional[Any] = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def lowerCamelCase_ ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
644
'''simple docstring''' from collections.abc import Iterable from typing import Any class __a : def __init__( self : Optional[Any] , __magic_name__ : int | None = None ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[str] = value UpperCAmelCase_ : Node | None = None # Added in order to delete a node easier UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None def __repr__( self : List[str] ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 ) class __a : def __init__( self : int , __magic_name__ : Node | None = None ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = root def __str__( self : Any ) -> str: """simple docstring""" return str(self.root ) def UpperCAmelCase__ ( self : Any , __magic_name__ : Node , __magic_name__ : Node | None ) -> None: """simple docstring""" if new_children is not None: # reset its kids UpperCAmelCase_ : Dict = node.parent if node.parent is not None: # reset its parent if self.is_right(__magic_name__ ): # If it is the right children UpperCAmelCase_ : Optional[Any] = new_children else: UpperCAmelCase_ : Optional[int] = new_children else: UpperCAmelCase_ : List[str] = new_children def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Node ) -> bool: """simple docstring""" if node.parent and node.parent.right: return node == node.parent.right return False def UpperCAmelCase__ ( self : Union[str, Any] ) -> bool: """simple docstring""" return self.root is None def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None: """simple docstring""" UpperCAmelCase_ : Tuple = Node(__magic_name__ ) # create a new Node if self.empty(): # if Tree is empty UpperCAmelCase_ : List[Any] = new_node # set its root else: # Tree is not empty UpperCAmelCase_ : str = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: UpperCAmelCase_ : Union[str, Any] = new_node # We insert the new node in a leaf break else: UpperCAmelCase_ : List[Any] = parent_node.left else: if parent_node.right is None: UpperCAmelCase_ : List[Any] = new_node break else: UpperCAmelCase_ : Union[str, Any] = parent_node.right UpperCAmelCase_ : Union[str, Any] = parent_node def UpperCAmelCase__ ( self : Optional[Any] , *__magic_name__ : List[str] ) -> None: """simple docstring""" for value in values: self.__insert(__magic_name__ ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> Node | None: """simple docstring""" if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: UpperCAmelCase_ : str = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: UpperCAmelCase_ : List[str] = node.left if value < node.value else node.right return node def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Node | None = None ) -> Node | None: """simple docstring""" if node is None: if self.root is None: return None UpperCAmelCase_ : Dict = self.root if not self.empty(): while node.right is not None: UpperCAmelCase_ : Any = node.right return node def UpperCAmelCase__ ( self : Dict , __magic_name__ : Node | None = None ) -> Node | None: """simple docstring""" if node is None: UpperCAmelCase_ : Optional[int] = self.root if self.root is None: return None if not self.empty(): UpperCAmelCase_ : Union[str, Any] = self.root while node.left is not None: UpperCAmelCase_ : Dict = node.left return node def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : List[str] = self.search(__magic_name__ ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(__magic_name__ , __magic_name__ ) elif node.left is None: # Has only right children self.__reassign_nodes(__magic_name__ , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(__magic_name__ , node.left ) else: UpperCAmelCase_ : List[str] = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore UpperCAmelCase_ : Optional[int] = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Node | None ) -> Iterable: """simple docstring""" if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any]=None ) -> Any: """simple docstring""" if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : list , __magic_name__ : Node | None ) -> None: """simple docstring""" if node: self.inorder(__magic_name__ , node.left ) arr.append(node.value ) self.inorder(__magic_name__ , node.right ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Node ) -> int: """simple docstring""" UpperCAmelCase_ : list[int] = [] self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal return arr[k - 1] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[Node]: UpperCAmelCase_ : Any = [] if curr_node is not None: UpperCAmelCase_ : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def lowerCamelCase_ ( ) -> None: UpperCAmelCase_ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7) UpperCAmelCase_ : Tuple = BinarySearchTree() for i in testlist: t.insert(SCREAMING_SNAKE_CASE__ ) # Prints all the elements of the list in order traversal print(SCREAMING_SNAKE_CASE__ ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''', t.get_max().value ) # type: ignore print('''Min Value: ''', t.get_min().value ) # type: ignore for i in testlist: t.remove(SCREAMING_SNAKE_CASE__ ) print(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
644
1
'''simple docstring''' import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList snake_case_ : Optional[Any] = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"] class __a (lowerCamelCase ): def __init__( self : int , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : int=None , __magic_name__ : List[str]=1 ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = tokenizer UpperCAmelCase_ : str = dataset UpperCAmelCase_ : str = len(__magic_name__ ) if n_tasks is None else n_tasks UpperCAmelCase_ : List[Any] = n_copies def __iter__( self : str ) -> Dict: """simple docstring""" UpperCAmelCase_ : Optional[Any] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() ) UpperCAmelCase_ : Any = self.tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors='''pt''' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class __a (lowerCamelCase ): def __init__( self : int , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = start_length UpperCAmelCase_ : Dict = eof_strings UpperCAmelCase_ : List[str] = tokenizer def __call__( self : str , __magic_name__ : Tuple , __magic_name__ : Any , **__magic_name__ : Optional[int] ) -> str: """simple docstring""" UpperCAmelCase_ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) UpperCAmelCase_ : List[str] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(__magic_name__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]: UpperCAmelCase_ : str = re.split('''(%s)''' % '''|'''.join(SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__ ) # last string should be "" return "".join(string_list[:-2] ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Tuple=20, **SCREAMING_SNAKE_CASE__ : Any ) -> List[str]: UpperCAmelCase_ : Dict = defaultdict(SCREAMING_SNAKE_CASE__ ) # dict of list of generated tokens for step, batch in tqdm(enumerate(SCREAMING_SNAKE_CASE__ ) ): with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = batch['''ids'''].shape[-1] UpperCAmelCase_ : Optional[int] = accelerator.unwrap_model(SCREAMING_SNAKE_CASE__ ).generate( input_ids=batch['''ids'''][:, : batch['''input_len''']], num_return_sequences=SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ) # each task is generated batch_size times UpperCAmelCase_ : Any = batch['''task_id'''].repeat(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = accelerator.pad_across_processes( SCREAMING_SNAKE_CASE__, dim=1, pad_index=tokenizer.pad_token_id ) UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather((generated_tokens, generated_tasks) ) UpperCAmelCase_ : Any = generated_tokens.cpu().numpy() UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): gen_token_dict[task].append(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: UpperCAmelCase_ : int = tokenizer.decode(SCREAMING_SNAKE_CASE__, skip_special_tokens=SCREAMING_SNAKE_CASE__, clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) code_gens[task].append(remove_last_block(SCREAMING_SNAKE_CASE__ ) ) return code_gens def lowerCamelCase_ ( ) -> Tuple: # Setup configuration UpperCAmelCase_ : Optional[Any] = HfArgumentParser(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[str] = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric UpperCAmelCase_ : Union[str, Any] = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing UpperCAmelCase_ : Any = '''false''' if args.num_workers is None: UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate UpperCAmelCase_ : Optional[Any] = Accelerator() set_seed(args.seed, device_specific=SCREAMING_SNAKE_CASE__ ) # Load model and tokenizer UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt ) UpperCAmelCase_ : int = tokenizer.eos_token UpperCAmelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings UpperCAmelCase_ : Any = { '''do_sample''': args.do_sample, '''temperature''': args.temperature, '''max_new_tokens''': args.max_new_tokens, '''top_p''': args.top_p, '''top_k''': args.top_k, '''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )] ), } # Load evaluation dataset and metric UpperCAmelCase_ : Optional[Any] = load_dataset('''openai_humaneval''' ) UpperCAmelCase_ : List[Any] = load_metric('''code_eval''' ) UpperCAmelCase_ : Tuple = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] ) UpperCAmelCase_ : Union[str, Any] = args.n_samples // args.batch_size UpperCAmelCase_ : Any = TokenizedDataset(SCREAMING_SNAKE_CASE__, human_eval['''test'''], n_copies=SCREAMING_SNAKE_CASE__, n_tasks=SCREAMING_SNAKE_CASE__ ) # do not confuse args.batch_size, which is actually the num_return_sequences UpperCAmelCase_ : List[str] = DataLoader(SCREAMING_SNAKE_CASE__, batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: UpperCAmelCase_ : Tuple = code_eval_metric.compute(references=[''''''], predictions=[['''''']] ) except ValueError as exception: print( '''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`''' ''' flag to enable code evaluation.''' ) raise exception UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.prepare(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[int] = complete_code( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, n_tasks=SCREAMING_SNAKE_CASE__, batch_size=args.batch_size, **SCREAMING_SNAKE_CASE__, ) if accelerator.is_main_process: UpperCAmelCase_ : Optional[Any] = [] for task in tqdm(range(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ : Any = human_eval['''test'''][task]['''test'''] UpperCAmelCase_ : List[Any] = F"""check({human_eval["test"][task]["entry_point"]})""" references.append('''\n''' + test_func + '''\n''' + entry_point ) # Evaluate completions with "code_eval" metric UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = code_eval_metric.compute( references=SCREAMING_SNAKE_CASE__, predictions=SCREAMING_SNAKE_CASE__, num_workers=args.num_workers ) print(F"""Results: {pass_at_k}""" ) # Save results to json file with open(args.output_file, '''w''' ) as fp: json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
644
'''simple docstring''' import sys import turtle def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None: my_pen.up() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) if depth == 0: return triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( "Correct format for using this script: " "python fractals.py <int:depth_for_fractal>" ) snake_case_ : Any = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("red") snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
644
1
'''simple docstring''' import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin snake_case_ : Tuple = get_tests_dir("fixtures/test_sentencepiece.model") snake_case_ : List[str] = get_tests_dir("fixtures/test_sentencepiece_bpe.model") snake_case_ : List[str] = "pt" if is_torch_available() else "tf" @require_sentencepiece @require_tokenizers class __a (lowerCamelCase , unittest.TestCase ): __a : str = CamembertTokenizer __a : Optional[int] = CamembertTokenizerFast __a : List[Any] = True __a : Union[str, Any] = True def UpperCAmelCase__ ( self : int ) -> Tuple: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase_ : Optional[Any] = CamembertTokenizer(__magic_name__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase__ ( self : Any ) -> Tuple: """simple docstring""" UpperCAmelCase_ : int = '''<pad>''' UpperCAmelCase_ : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(__magic_name__ ) , 10_04 ) def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_05 ) def UpperCAmelCase__ ( self : List[Any] ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = CamembertTokenizer(__magic_name__ ) tokenizer.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : Optional[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) UpperCAmelCase_ : Optional[int] = '''I was born in 92000, and this is falsé.''' UpperCAmelCase_ : Any = tokenizer.encode(__magic_name__ ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.encode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) UpperCAmelCase_ : List[str] = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) UpperCAmelCase_ : List[Any] = tokenizer.convert_ids_to_tokens(__magic_name__ ) UpperCAmelCase_ : Any = rust_tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" if not self.test_rust_tokenizer: return UpperCAmelCase_ : List[str] = self.get_tokenizer() UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer() UpperCAmelCase_ : List[Any] = '''I was born in 92000, and this is falsé.''' UpperCAmelCase_ : int = tokenizer.tokenize(__magic_name__ ) UpperCAmelCase_ : List[Any] = rust_tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) UpperCAmelCase_ : Any = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = self.get_rust_tokenizer() UpperCAmelCase_ : int = tokenizer.encode(__magic_name__ ) UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) @slow def UpperCAmelCase__ ( self : Any ) -> str: """simple docstring""" # fmt: off UpperCAmelCase_ : str = {'''input_ids''': [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. UpperCAmelCase_ : List[Any] = [ '''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ''' '''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''', '''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ''' '''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ''' '''telles que la traduction et la synthèse de texte.''', ] self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=__magic_name__ , )
644
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device snake_case_ : List[str] = False class __a (unittest.TestCase ): pass @nightly @require_torch_gpu class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = pipe.dual_guided( prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__magic_name__ ) UpperCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Any = generator.manual_seed(0 ) UpperCAmelCase_ : Dict = pipe.dual_guided( prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def UpperCAmelCase__ ( self : str ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = '''cyberpunk 2077''' UpperCAmelCase_ : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = pipe.dual_guided( prompt=__magic_name__ , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images UpperCAmelCase_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Union[str, Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger ''' UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : List[Any] = pipe.text_to_image( prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images UpperCAmelCase_ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Any = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase_ : Tuple = pipe.image_variation(__magic_name__ , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 1000 ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = 1, 1 UpperCAmelCase_ : List[str] = [] for i in range(1, n + 1 ): UpperCAmelCase_ : int = prev_numerator + 2 * prev_denominator UpperCAmelCase_ : Any = prev_numerator + prev_denominator if len(str(SCREAMING_SNAKE_CASE__ ) ) > len(str(SCREAMING_SNAKE_CASE__ ) ): result.append(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Tuple = numerator UpperCAmelCase_ : Optional[Any] = denominator return len(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": print(f'''{solution() = }''')
644
'''simple docstring''' snake_case_ : int = { "Pillow": "Pillow", "accelerate": "accelerate>=0.11.0", "compel": "compel==0.1.8", "black": "black~=23.1", "datasets": "datasets", "filelock": "filelock", "flax": "flax>=0.4.1", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.13.2", "requests-mock": "requests-mock==1.10.0", "importlib_metadata": "importlib_metadata", "invisible-watermark": "invisible-watermark", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2", "jaxlib": "jaxlib>=0.1.65", "Jinja2": "Jinja2", "k-diffusion": "k-diffusion>=0.0.12", "torchsde": "torchsde", "note_seq": "note_seq", "librosa": "librosa", "numpy": "numpy", "omegaconf": "omegaconf", "parameterized": "parameterized", "protobuf": "protobuf>=3.20.3,<4", "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "ruff": "ruff>=0.0.241", "safetensors": "safetensors", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "scipy": "scipy", "onnx": "onnx", "regex": "regex!=2019.12.17", "requests": "requests", "tensorboard": "tensorboard", "torch": "torch>=1.4", "torchvision": "torchvision", "transformers": "transformers>=4.25.1", "urllib3": "urllib3<=2.0.0", }
644
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case_ : Tuple = { "configuration_autoformer": [ "AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "AutoformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[int] = [ "AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "AutoformerForPrediction", "AutoformerModel", "AutoformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys snake_case_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
644
'''simple docstring''' import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __a (unittest.TestCase ): @property def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet UpperCAmelCase_ : Dict = KarrasVeScheduler() UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0] UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256''' UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ ) UpperCAmelCase_ : List[Any] = KarrasVeScheduler() UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
644
1
'''simple docstring''' from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline snake_case_ : Tuple = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase ) class __a (lowerCamelCase ): def __init__( self : Optional[Any] , **__magic_name__ : List[Any] ) -> Union[str, Any]: """simple docstring""" super().__init__(**__magic_name__ ) if self.framework != "pt": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" ) # No specific FOR_XXX available yet def __call__( self : Union[str, Any] , __magic_name__ : Union[np.ndarray, bytes, str] , **__magic_name__ : Any ) -> Any: """simple docstring""" return super().__call__(__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : str , **__magic_name__ : List[str] ) -> Any: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = {} if "candidate_labels" in kwargs: UpperCAmelCase_ : Optional[int] = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: UpperCAmelCase_ : Union[str, Any] = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : int=None , __magic_name__ : Optional[Any]="This is a sound of {}." ) -> str: """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): if audio.startswith('''http://''' ) or audio.startswith('''https://''' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png UpperCAmelCase_ : Optional[int] = requests.get(__magic_name__ ).content else: with open(__magic_name__ , '''rb''' ) as f: UpperCAmelCase_ : Optional[int] = f.read() if isinstance(__magic_name__ , __magic_name__ ): UpperCAmelCase_ : str = ffmpeg_read(__magic_name__ , self.feature_extractor.sampling_rate ) if not isinstance(__magic_name__ , np.ndarray ): raise ValueError('''We expect a numpy ndarray as input''' ) if len(audio.shape ) != 1: raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' ) UpperCAmelCase_ : Union[str, Any] = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' ) UpperCAmelCase_ : List[Any] = candidate_labels UpperCAmelCase_ : Dict = [hypothesis_template.format(__magic_name__ ) for x in candidate_labels] UpperCAmelCase_ : Optional[int] = self.tokenizer(__magic_name__ , return_tensors=self.framework , padding=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = [text_inputs] return inputs def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Optional[Any] = model_inputs.pop('''candidate_labels''' ) UpperCAmelCase_ : Dict = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] , __magic_name__ ): UpperCAmelCase_ : str = text_inputs[0] else: # Batching case. UpperCAmelCase_ : List[Any] = text_inputs[0][0] UpperCAmelCase_ : Optional[int] = self.model(**__magic_name__ , **__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_audio, } return model_outputs def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> str: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = model_outputs.pop('''candidate_labels''' ) UpperCAmelCase_ : Optional[int] = model_outputs['''logits'''][0] if self.framework == "pt": UpperCAmelCase_ : int = logits.softmax(dim=0 ) UpperCAmelCase_ : Tuple = probs.tolist() else: raise ValueError('''`tf` framework not supported.''' ) UpperCAmelCase_ : Dict = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(__magic_name__ , __magic_name__ ) , key=lambda __magic_name__ : -x[0] ) ] return result
644
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class __a (lowerCamelCase ): __a : List[Any] = "openai/whisper-base" __a : Optional[Any] = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __a : Any = "transcriber" __a : str = WhisperProcessor __a : List[Any] = WhisperForConditionalGeneration __a : int = ["audio"] __a : Optional[Any] = ["text"] def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]: """simple docstring""" return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple: """simple docstring""" return self.model.generate(inputs=__magic_name__ ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str: """simple docstring""" return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
644
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available snake_case_ : Optional[Any] = { "configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"], "tokenization_m2m_100": ["M2M100Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Dict = [ "M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST", "M2M100ForConditionalGeneration", "M2M100Model", "M2M100PreTrainedModel", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y return abs(SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> Optional[int]: try: UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) UpperCAmelCase_ : Optional[int] = int(nums[0] ) UpperCAmelCase_ : List[Any] = int(nums[1] ) print( F"""greatest_common_divisor({num_a}, {num_a}) = """ F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : bool = False ) -> str: if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Any = F"""Expected string as input, found {type(SCREAMING_SNAKE_CASE__ )}""" raise ValueError(SCREAMING_SNAKE_CASE__ ) if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : List[Any] = F"""Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE__ )}""" raise ValueError(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Union[str, Any] = input_str.split('''_''' ) UpperCAmelCase_ : int = 0 if use_pascal else 1 UpperCAmelCase_ : Dict = words[start_index:] UpperCAmelCase_ : Tuple = [word[0].upper() + word[1:] for word in words_to_capitalize] UpperCAmelCase_ : List[Any] = '''''' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
644
'''simple docstring''' import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class __a : def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str: """simple docstring""" UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : Optional[int] = batch_size UpperCAmelCase_ : List[str] = seq_length UpperCAmelCase_ : Dict = is_training UpperCAmelCase_ : List[str] = use_input_mask UpperCAmelCase_ : Any = use_token_type_ids UpperCAmelCase_ : Any = use_labels UpperCAmelCase_ : Any = vocab_size UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : Tuple = num_hidden_layers UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : int = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : Optional[int] = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Union[str, Any] = max_position_embeddings UpperCAmelCase_ : int = type_vocab_size UpperCAmelCase_ : List[Any] = type_sequence_label_size UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Dict = num_labels UpperCAmelCase_ : List[str] = scope UpperCAmelCase_ : List[str] = range_bbox def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase_ : List[str] = bbox[i, j, 3] UpperCAmelCase_ : Dict = bbox[i, j, 1] UpperCAmelCase_ : Optional[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase_ : List[str] = bbox[i, j, 2] UpperCAmelCase_ : Tuple = bbox[i, j, 0] UpperCAmelCase_ : Union[str, Any] = t UpperCAmelCase_ : int = None if self.use_input_mask: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCAmelCase_ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Dict = None UpperCAmelCase_ : Tuple = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Any = self.num_labels UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : List[Any] = model( __magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Optional[Any] = model( __magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase_ : Tuple = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : Tuple = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __a : Any = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) __a : Union[str, Any] = False __a : int = False def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str: """simple docstring""" return True def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = LiltModelTester(self ) UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : Tuple = type self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) @require_torch @slow class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ ) UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ ) UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ ) UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] ) UpperCAmelCase_ : List[str] = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , ) self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
644
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class __a : def __init__( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Any=True , __magic_name__ : Any=True , __magic_name__ : List[str]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Optional[int]=99 , __magic_name__ : Any=32 , __magic_name__ : Tuple=2 , __magic_name__ : List[str]=4 , __magic_name__ : Dict=37 , __magic_name__ : Tuple="gelu" , __magic_name__ : int=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Any=16 , __magic_name__ : Dict=2 , __magic_name__ : str=0.0_2 , __magic_name__ : Any=3 , __magic_name__ : List[Any]=4 , __magic_name__ : Optional[Any]=None , __magic_name__ : Dict=0 , ) -> Dict: """simple docstring""" UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : Optional[int] = batch_size UpperCAmelCase_ : Optional[Any] = seq_length UpperCAmelCase_ : List[str] = is_training UpperCAmelCase_ : Optional[int] = use_input_mask UpperCAmelCase_ : Optional[Any] = use_token_type_ids UpperCAmelCase_ : Dict = use_labels UpperCAmelCase_ : Tuple = vocab_size UpperCAmelCase_ : int = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : int = num_attention_heads UpperCAmelCase_ : Optional[int] = intermediate_size UpperCAmelCase_ : Dict = hidden_act UpperCAmelCase_ : Dict = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : List[Any] = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = type_vocab_size UpperCAmelCase_ : Any = type_sequence_label_size UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : Optional[int] = num_labels UpperCAmelCase_ : Optional[int] = num_choices UpperCAmelCase_ : Union[str, Any] = scope UpperCAmelCase_ : Optional[Any] = projection_dim def UpperCAmelCase__ ( self : Dict ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : int = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py UpperCAmelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : Any = None if self.use_token_type_ids: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Any = None UpperCAmelCase_ : Optional[int] = None UpperCAmelCase_ : Tuple = None if self.use_labels: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : int = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) UpperCAmelCase_ : Union[str, Any] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Union[str, Any] ) -> str: """simple docstring""" UpperCAmelCase_ : Any = TFDPRContextEncoder(config=__magic_name__ ) UpperCAmelCase_ : Tuple = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = model(__magic_name__ ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : int ) -> Dict: """simple docstring""" UpperCAmelCase_ : Any = TFDPRQuestionEncoder(config=__magic_name__ ) UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : Optional[int] = model(__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : int = model(__magic_name__ ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Dict = TFDPRReader(config=__magic_name__ ) UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Dict = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[Any] = config_and_inputs UpperCAmelCase_ : int = {'''input_ids''': input_ids} return config, inputs_dict @require_tf class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : Union[str, Any] = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) __a : Dict = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {} __a : str = False __a : Optional[int] = False __a : List[str] = False __a : Optional[Any] = False __a : List[str] = False def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Any = TFDPRModelTester(self ) UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Optional[int] ) -> int: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*__magic_name__ ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*__magic_name__ ) def UpperCAmelCase__ ( self : Dict ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : List[str] = TFDPRContextEncoder.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = TFDPRContextEncoder.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Dict = TFDPRQuestionEncoder.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Union[str, Any] = TFDPRReader.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) @require_tf class __a (unittest.TestCase ): @slow def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Tuple = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' ) UpperCAmelCase_ : Tuple = tf.constant( [[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP] UpperCAmelCase_ : Tuple = model(__magic_name__ )[0] # embedding shape = (1, 768) # compare the actual values for a slice. UpperCAmelCase_ : Optional[Any] = tf.constant( [ [ 0.0_3_2_3_6_2_5_3, 0.1_2_7_5_3_3_3_5, 0.1_6_8_1_8_5_0_9, 0.0_0_2_7_9_7_8_6, 0.3_8_9_6_9_3_3, 0.2_4_2_6_4_9_4_5, 0.2_1_7_8_9_7_1, -0.0_2_3_3_5_2_2_7, -0.0_8_4_8_1_9_5_9, -0.1_4_3_2_4_1_1_7, ] ] ) self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
644
'''simple docstring''' import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case_ : str = logging.get_logger(__name__) snake_case_ : int = "▁" snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"} snake_case_ : int = { "sentencepiece_model_file": "sentencepiece.bpe.model", "vocab_file": "vocab.txt", } snake_case_ : Optional[Any] = { "vocab_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", }, "sentencepiece_model_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", }, } snake_case_ : Dict = { "ernie-m-base": 5_14, "ernie-m-large": 5_14, } snake_case_ : Any = { "ernie-m-base": {"do_lower_case": False}, "ernie-m-large": {"do_lower_case": False}, } class __a (lowerCamelCase ): __a : List[str] = ["input_ids"] __a : Union[str, Any] = VOCAB_FILES_NAMES __a : Tuple = PRETRAINED_INIT_CONFIGURATION __a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __a : Union[str, Any] = RESOURCE_FILES_NAMES def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) UpperCAmelCase_ : Optional[Any] = do_lower_case UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__magic_name__ ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ ) else: UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )} UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()} def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any: """simple docstring""" if text is None: return None UpperCAmelCase_ : str = self.tokenize(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', [] for i, ch in enumerate(__magic_name__ ): if ch in self.SP_CHAR_MAPPING: UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ ) else: UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ ) if self.is_whitespace(__magic_name__ ): continue normalized_text += ch char_mapping.extend([i] * len(__magic_name__ ) ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0 if self.do_lower_case: UpperCAmelCase_ : Optional[int] = text.lower() for token in split_tokens: if token[:1] == "▁": UpperCAmelCase_ : Tuple = token[1:] UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) UpperCAmelCase_ : int = end return token_mapping @property def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" return len(self.vocab ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self : str ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.__dict__.copy() UpperCAmelCase_ : Optional[Any] = None return state def __setstate__( self : str , __magic_name__ : Any ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCAmelCase_ : int = {} UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]: """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]: """simple docstring""" if self.sp_model_kwargs.get('''enable_sampling''' ) is True: UpperCAmelCase_ : Dict = True if self.sp_model_kwargs.get('''alpha''' ) is not None: UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' ) if self.sp_model_kwargs.get('''nbest_size''' ) is not None: UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' ) if not enable_sampling: UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ ) else: UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ ) UpperCAmelCase_ : List[Any] = [] for pi, piece in enumerate(__magic_name__ ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0: new_pieces.append(__magic_name__ ) continue else: continue UpperCAmelCase_ : List[str] = 0 for i, chunk in enumerate(__magic_name__ ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__magic_name__ ) UpperCAmelCase_ : List[Any] = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCAmelCase_ : List[str] = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCAmelCase_ : str = i if len(__magic_name__ ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip() return out_string def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ ) UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip() return out_string def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]: """simple docstring""" return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.reverse_vocab.get(__magic_name__ , self.unk_token ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id] UpperCAmelCase_ : List[Any] = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int: """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1] def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method if token_ids_a is None: # [CLS] X [SEP] return (len(__magic_name__ ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3) def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple: """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str: """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict: """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__magic_name__ ) == 1: UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ ) if cat == "Zs": return True return False def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = {} with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(__magic_name__ ): UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' ) UpperCAmelCase_ : Dict = int(__magic_name__ ) return token_to_idx def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = 0 if os.path.isdir(__magic_name__ ): UpperCAmelCase_ : Any = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) UpperCAmelCase_ : Dict = token_index writer.write(token + '''\n''' ) index += 1 UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' ) with open(__magic_name__ , '''wb''' ) as fi: UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (vocab_file,)
644
1
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging snake_case_ : Optional[Any] = logging.get_logger(__name__) class __a (lowerCamelCase ): def __init__( self : Optional[int] , __magic_name__ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> str: """simple docstring""" super().__init__() UpperCAmelCase_ : Dict = nn.ModuleList(__magic_name__ ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : torch.FloatTensor , __magic_name__ : Union[torch.Tensor, float, int] , __magic_name__ : torch.Tensor , __magic_name__ : List[torch.tensor] , __magic_name__ : List[float] , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[Dict[str, Any]] = None , __magic_name__ : bool = False , __magic_name__ : bool = True , ) -> Union[ControlNetOutput, Tuple]: """simple docstring""" for i, (image, scale, controlnet) in enumerate(zip(__magic_name__ , __magic_name__ , self.nets ) ): UpperCAmelCase_ , UpperCAmelCase_ : List[str] = controlnet( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) # merge samples if i == 0: UpperCAmelCase_ , UpperCAmelCase_ : Any = down_samples, mid_sample else: UpperCAmelCase_ : Optional[int] = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__magic_name__ , __magic_name__ ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, os.PathLike] , __magic_name__ : bool = True , __magic_name__ : Callable = None , __magic_name__ : bool = False , __magic_name__ : Optional[str] = None , ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = 0 UpperCAmelCase_ : List[str] = save_directory for controlnet in self.nets: controlnet.save_pretrained( __magic_name__ , is_main_process=__magic_name__ , save_function=__magic_name__ , safe_serialization=__magic_name__ , variant=__magic_name__ , ) idx += 1 UpperCAmelCase_ : List[Any] = model_path_to_save + F"""_{idx}""" @classmethod def UpperCAmelCase__ ( cls : Dict , __magic_name__ : Optional[Union[str, os.PathLike]] , **__magic_name__ : Dict ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : int = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... UpperCAmelCase_ : int = pretrained_model_path while os.path.isdir(__magic_name__ ): UpperCAmelCase_ : Optional[int] = ControlNetModel.from_pretrained(__magic_name__ , **__magic_name__ ) controlnets.append(__magic_name__ ) idx += 1 UpperCAmelCase_ : Union[str, Any] = pretrained_model_path + F"""_{idx}""" logger.info(F"""{len(__magic_name__ )} controlnets loaded from {pretrained_model_path}.""" ) if len(__magic_name__ ) == 0: raise ValueError( F"""No ControlNets found under {os.path.dirname(__magic_name__ )}. Expected at least {pretrained_model_path + "_0"}.""" ) return cls(__magic_name__ )
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str: if number > 0: raise ValueError('''input must be a negative integer''' ) UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] ) UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:] UpperCAmelCase_ : Optional[Any] = ( ( '''1''' + '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
644
1
'''simple docstring''' import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets snake_case_ : Union[str, Any] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n" snake_case_ : Dict = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n" snake_case_ : List[Any] = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __a (datasets.Metric ): def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/ROUGE_(metric)''', '''https://github.com/google-research/google-research/tree/master/rouge''', ] , ) def UpperCAmelCase__ ( self : str , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Optional[Any]=None , __magic_name__ : Dict=True , __magic_name__ : Dict=False ) -> int: """simple docstring""" if rouge_types is None: UpperCAmelCase_ : Union[str, Any] = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum'''] UpperCAmelCase_ : Optional[Any] = rouge_scorer.RougeScorer(rouge_types=__magic_name__ , use_stemmer=__magic_name__ ) if use_aggregator: UpperCAmelCase_ : List[str] = scoring.BootstrapAggregator() else: UpperCAmelCase_ : str = [] for ref, pred in zip(__magic_name__ , __magic_name__ ): UpperCAmelCase_ : Any = scorer.score(__magic_name__ , __magic_name__ ) if use_aggregator: aggregator.add_scores(__magic_name__ ) else: scores.append(__magic_name__ ) if use_aggregator: UpperCAmelCase_ : List[Any] = aggregator.aggregate() else: UpperCAmelCase_ : Tuple = {} for key in scores[0]: UpperCAmelCase_ : Tuple = [score[key] for score in scores] return result
644
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]] UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ ) self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) ) with self.assertRaises(__magic_name__ ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__magic_name__ ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__magic_name__ ): DisjunctiveConstraint(__magic_name__ ) # fails here def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 ) UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 ) UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : int ) -> Dict: """simple docstring""" UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
644
1
'''simple docstring''' import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" debug_launcher(test_script.main ) def UpperCAmelCase__ ( self : Dict ) -> Any: """simple docstring""" debug_launcher(test_ops.main )
644
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None) snake_case_ : Optional[Any] = df.shape[:1][0] # If you're using some other dataset input the target column snake_case_ : Any = df.iloc[:, 1:2] snake_case_ : str = actual_data.values.reshape(len_data, 1) snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data) snake_case_ : List[str] = 10 snake_case_ : Any = 5 snake_case_ : Any = 20 snake_case_ : Tuple = len_data - periods * look_back snake_case_ : str = actual_data[:division] snake_case_ : Optional[int] = actual_data[division - look_back :] snake_case_ ,snake_case_ : Any = [], [] snake_case_ ,snake_case_ : Union[str, Any] = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) snake_case_ : Any = np.array(train_x) snake_case_ : Optional[Any] = np.array(test_x) snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y]) snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y]) snake_case_ : List[Any] = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss="mean_squared_error", optimizer="adam") snake_case_ : Dict = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) snake_case_ : Optional[Any] = model.predict(x_test)
644
1
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) snake_case_ : str = logging.getLogger(__name__) def lowerCamelCase_ ( ) -> Tuple: UpperCAmelCase_ : int = argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''', type=SCREAMING_SNAKE_CASE__, default='''data/dump.txt''', help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''', type=SCREAMING_SNAKE_CASE__, default='''bert''', choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''', type=SCREAMING_SNAKE_CASE__, default='''bert-base-uncased''', help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''', type=SCREAMING_SNAKE_CASE__, default='''data/dump''', help='''The dump file prefix.''' ) UpperCAmelCase_ : int = parser.parse_args() logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" ) if args.tokenizer_type == "bert": UpperCAmelCase_ : Any = BertTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase_ : Tuple = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` UpperCAmelCase_ : Optional[int] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": UpperCAmelCase_ : Optional[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase_ : str = tokenizer.special_tokens_map['''cls_token'''] # `<s>` UpperCAmelCase_ : Any = tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": UpperCAmelCase_ : int = GPTaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase_ : Optional[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` UpperCAmelCase_ : List[Any] = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F"""Loading text from {args.file_path}""" ) with open(args.file_path, '''r''', encoding='''utf8''' ) as fp: UpperCAmelCase_ : List[str] = fp.readlines() logger.info('''Start encoding''' ) logger.info(F"""{len(SCREAMING_SNAKE_CASE__ )} examples to process.""" ) UpperCAmelCase_ : str = [] UpperCAmelCase_ : Tuple = 0 UpperCAmelCase_ : List[str] = 10000 UpperCAmelCase_ : Any = time.time() for text in data: UpperCAmelCase_ : Optional[int] = F"""{bos} {text.strip()} {sep}""" UpperCAmelCase_ : str = tokenizer.encode(SCREAMING_SNAKE_CASE__, add_special_tokens=SCREAMING_SNAKE_CASE__ ) rslt.append(SCREAMING_SNAKE_CASE__ ) iter += 1 if iter % interval == 0: UpperCAmelCase_ : int = time.time() logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" ) UpperCAmelCase_ : Tuple = time.time() logger.info('''Finished binarization''' ) logger.info(F"""{len(SCREAMING_SNAKE_CASE__ )} examples processed.""" ) UpperCAmelCase_ : int = F"""{args.dump_file}.{args.tokenizer_name}.pickle""" UpperCAmelCase_ : Optional[Any] = tokenizer.vocab_size if vocab_size < (1 << 16): UpperCAmelCase_ : Tuple = [np.uintaa(SCREAMING_SNAKE_CASE__ ) for d in rslt] else: UpperCAmelCase_ : List[str] = [np.intaa(SCREAMING_SNAKE_CASE__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(F"""Dump to {dp_file}""" ) with open(SCREAMING_SNAKE_CASE__, '''wb''' ) as handle: pickle.dump(rslt_, SCREAMING_SNAKE_CASE__, protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
644
'''simple docstring''' from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker snake_case_ : Union[str, Any] = "CompVis/stable-diffusion-v1-1" snake_case_ : Dict = "CompVis/stable-diffusion-v1-2" snake_case_ : Any = "CompVis/stable-diffusion-v1-3" snake_case_ : str = "CompVis/stable-diffusion-v1-4" class __a (lowerCamelCase ): def __init__( self : Any , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ) -> str: """simple docstring""" super()._init_() UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : Tuple = StableDiffusionPipeline( vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, Any]: """simple docstring""" return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith('''_''' )} def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" self.enable_attention_slicing(__magic_name__ ) @torch.no_grad() def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Any , ) -> Any: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ) -> List[str]: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> str: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(__magic_name__ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.2 UpperCAmelCase_ : int = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.3 UpperCAmelCase_ : str = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.4 UpperCAmelCase_ : str = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
644
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case_ : Dict = { "configuration_nllb_moe": [ "NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", "NllbMoeConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Dict = [ "NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST", "NllbMoeForConditionalGeneration", "NllbMoeModel", "NllbMoePreTrainedModel", "NllbMoeTop2Router", "NllbMoeSparseMLP", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
644
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler snake_case_ : Optional[int] = 16 snake_case_ : Tuple = 32 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Accelerator, SCREAMING_SNAKE_CASE__ : int = 16, SCREAMING_SNAKE_CASE__ : str = "bert-base-cased" ) -> Dict: UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = load_dataset('''glue''', '''mrpc''' ) def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[int] ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase_ : Tuple = datasets.map( SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=SCREAMING_SNAKE_CASE__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' ) def collate_fn(SCREAMING_SNAKE_CASE__ : str ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' ) return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' ) # Instantiate dataloaders. UpperCAmelCase_ : str = DataLoader( tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = DataLoader( tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ ) return train_dataloader, eval_dataloader def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Any: model.eval() UpperCAmelCase_ : List[str] = 0 for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ : Dict = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(SCREAMING_SNAKE_CASE__ ) - 1: UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase_ : int = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, ) UpperCAmelCase_ : List[str] = metric.compute() return eval_metric["accuracy"] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int ) -> Tuple: # Initialize accelerator UpperCAmelCase_ : Union[str, Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ : int = config['''lr'''] UpperCAmelCase_ : Optional[int] = int(config['''num_epochs'''] ) UpperCAmelCase_ : Optional[int] = int(config['''seed'''] ) UpperCAmelCase_ : List[str] = int(config['''batch_size'''] ) UpperCAmelCase_ : Optional[int] = args.model_name_or_path set_seed(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, return_dict=SCREAMING_SNAKE_CASE__ ) # Instantiate optimizer UpperCAmelCase_ : str = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCAmelCase_ : List[str] = optimizer_cls(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ ) if accelerator.state.deepspeed_plugin is not None: UpperCAmelCase_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: UpperCAmelCase_ : Tuple = 1 UpperCAmelCase_ : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=0, num_training_steps=SCREAMING_SNAKE_CASE__, ) else: UpperCAmelCase_ : Any = DummyScheduler(SCREAMING_SNAKE_CASE__, total_num_steps=SCREAMING_SNAKE_CASE__, warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase_ : Union[str, Any] = 0 # We also need to keep track of the stating epoch so files are named properly UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : int = evaluate.load('''glue''', '''mrpc''' ) UpperCAmelCase_ : Optional[Any] = num_epochs if args.partial_train_epoch is not None: UpperCAmelCase_ : List[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) UpperCAmelCase_ : Tuple = args.resume_from_checkpoint.split('''epoch_''' )[1] UpperCAmelCase_ : int = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ ) + 1 UpperCAmelCase_ : Dict = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) accelerator.print('''resumed checkpoint performance:''', SCREAMING_SNAKE_CASE__ ) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] ) accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] ) with open(os.path.join(args.output_dir, F"""state_{starting_epoch-1}.json""" ), '''r''' ) as f: UpperCAmelCase_ : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model UpperCAmelCase_ : int = {} for epoch in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = outputs.loss UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 UpperCAmelCase_ : Tuple = F"""epoch_{epoch}""" UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir, SCREAMING_SNAKE_CASE__ ) accelerator.save_state(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = accuracy UpperCAmelCase_ : Any = lr_scheduler.get_lr()[0] UpperCAmelCase_ : List[str] = optimizer.param_groups[0]['''lr'''] UpperCAmelCase_ : Tuple = epoch UpperCAmelCase_ : Dict = overall_step accelerator.print(F"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir, F"""state_{epoch}.json""" ), '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> List[str]: UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, ) parser.add_argument( '''--output_dir''', type=SCREAMING_SNAKE_CASE__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', ) parser.add_argument( '''--resume_from_checkpoint''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If the training should continue from a checkpoint folder.''', ) parser.add_argument( '''--partial_train_epoch''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If passed, the training will stop after this number of epochs.''', ) parser.add_argument( '''--num_epochs''', type=SCREAMING_SNAKE_CASE__, default=2, help='''Number of train epochs.''', ) UpperCAmelCase_ : Optional[int] = parser.parse_args() UpperCAmelCase_ : List[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
644
1
'''simple docstring''' from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 snake_case_ : List[str] = { # 1536-bit 5: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 2048-bit 14: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AACAA68FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 3072-bit 15: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 4096-bit 16: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" + "FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 6144-bit 17: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08" + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" + "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8" + "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C" + "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718" + "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D" + "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D" + "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226" + "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC" + "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26" + "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB" + "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2" + "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127" + "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406" + "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918" + "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151" + "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03" + "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F" + "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B" + "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632" + "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E" + "6DCC4024FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 8192-bit 18: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, } class __a : def __init__( self : Any , __magic_name__ : int = 14 ) -> None: """simple docstring""" if group not in primes: raise ValueError('''Unsupported Group''' ) UpperCAmelCase_ : Optional[Any] = primes[group]['''prime'''] UpperCAmelCase_ : str = primes[group]['''generator'''] UpperCAmelCase_ : Union[str, Any] = int(hexlify(urandom(32 ) ) , base=16 ) def UpperCAmelCase__ ( self : List[Any] ) -> str: """simple docstring""" return hex(self.__private_key )[2:] def UpperCAmelCase__ ( self : Any ) -> str: """simple docstring""" UpperCAmelCase_ : Any = pow(self.generator , self.__private_key , self.prime ) return hex(__magic_name__ )[2:] def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : int ) -> bool: """simple docstring""" # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(__magic_name__ , (self.prime - 1) // 2 , self.prime ) == 1 ) def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> str: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = int(__magic_name__ , base=16 ) if not self.is_valid_public_key(__magic_name__ ): raise ValueError('''Invalid public key''' ) UpperCAmelCase_ : Tuple = pow(__magic_name__ , self.__private_key , self.prime ) return shaaaa(str(__magic_name__ ).encode() ).hexdigest() @staticmethod def UpperCAmelCase__ ( __magic_name__ : int , __magic_name__ : int ) -> bool: """simple docstring""" # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(__magic_name__ , (prime - 1) // 2 , __magic_name__ ) == 1 ) @staticmethod def UpperCAmelCase__ ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int = 14 ) -> str: """simple docstring""" UpperCAmelCase_ : str = int(__magic_name__ , base=16 ) UpperCAmelCase_ : Union[str, Any] = int(__magic_name__ , base=16 ) UpperCAmelCase_ : Any = primes[group]['''prime'''] if not DiffieHellman.is_valid_public_key_static(__magic_name__ , __magic_name__ ): raise ValueError('''Invalid public key''' ) UpperCAmelCase_ : List[Any] = pow(__magic_name__ , __magic_name__ , __magic_name__ ) return shaaaa(str(__magic_name__ ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]: UpperCAmelCase_ : int = [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ : List[Any] = nums.pop(0 ) UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE__ ) result.extend(SCREAMING_SNAKE_CASE__ ) nums.append(SCREAMING_SNAKE_CASE__ ) return result def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): if start == len(SCREAMING_SNAKE_CASE__ ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start] backtrack(start + 1 ) UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack UpperCAmelCase_ : Optional[int] = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function snake_case_ : Tuple = permutea([1, 2, 3]) print(res) doctest.testmod()
644
1
'''simple docstring''' import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __a : def __init__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=99 , __magic_name__ : Tuple=32 , __magic_name__ : int=5 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[int]=None , ) -> str: """simple docstring""" UpperCAmelCase_ : Any = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : List[Any] = seq_length UpperCAmelCase_ : str = is_training UpperCAmelCase_ : Any = use_input_mask UpperCAmelCase_ : List[str] = use_token_type_ids UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : Optional[Any] = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : Optional[int] = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : str = type_vocab_size UpperCAmelCase_ : Optional[Any] = type_sequence_label_size UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : int = num_labels UpperCAmelCase_ : Optional[int] = num_choices UpperCAmelCase_ : Tuple = scope def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : str = None if self.use_token_type_ids: UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = BioGptModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptForCausalLM(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , *__magic_name__ : Any ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() # create attention mask UpperCAmelCase_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ ) UpperCAmelCase_ : Any = self.seq_length // 2 UpperCAmelCase_ : Tuple = 0 # first forward pass UpperCAmelCase_ , UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ).to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids UpperCAmelCase_ : List[str] = ids_tensor((1,) , __magic_name__ ).item() + 1 UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) UpperCAmelCase_ : str = random_other_next_tokens # append to next input_ids and attn_mask UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : int = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__magic_name__ )] , dim=1 , ) # get two different outputs UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] UpperCAmelCase_ : int = model(__magic_name__ , past_key_values=__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] # select random slice UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach() UpperCAmelCase_ : Dict = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , *__magic_name__ : str ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval() UpperCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ ) # first forward pass UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[ '''last_hidden_state''' ] # select random slice UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , *__magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = BioGptForCausalLM(__magic_name__ ) model.to(__magic_name__ ) if gradient_checkpointing: model.gradient_checkpointing_enable() UpperCAmelCase_ : List[str] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : List[str] ) -> str: """simple docstring""" UpperCAmelCase_ : int = BioGptModel(__magic_name__ ) UpperCAmelCase_ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 ) def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , *__magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : Any = BioGptForTokenClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : int = config_and_inputs UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : str = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) __a : List[Any] = (BioGptForCausalLM,) if is_torch_available() else () __a : Union[str, Any] = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) __a : List[str] = False def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" UpperCAmelCase_ : List[str] = BioGptModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : str = type self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*__magic_name__ , gradient_checkpointing=__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[str]: """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ ) def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__magic_name__ ) UpperCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : Tuple = '''left''' # Define PAD Token = EOS Token = 50256 UpperCAmelCase_ : List[Any] = tokenizer.eos_token UpperCAmelCase_ : List[Any] = model.config.eos_token_id # use different length sentences to test batching UpperCAmelCase_ : Tuple = [ '''Hello, my dog is a little''', '''Today, I''', ] UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = inputs['''input_ids'''].to(__magic_name__ ) UpperCAmelCase_ : Any = model.generate( input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''].to(__magic_name__ ) , ) UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__magic_name__ ) UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ ) UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item() UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__magic_name__ ) UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings ) UpperCAmelCase_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = [ '''Hello, my dog is a little bit bigger than a little bit.''', '''Today, I have a good idea of how to use the information''', ] self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[str] = 3 UpperCAmelCase_ : Tuple = input_dict['''input_ids'''] UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(__magic_name__ ) UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase_ : Dict = BioGptForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[Any] = 3 UpperCAmelCase_ : Optional[int] = '''multi_label_classification''' UpperCAmelCase_ : int = input_dict['''input_ids'''] UpperCAmelCase_ : str = input_ids.ne(1 ).to(__magic_name__ ) UpperCAmelCase_ : Any = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __a (unittest.TestCase ): @slow def UpperCAmelCase__ ( self : List[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) UpperCAmelCase_ : str = model(__magic_name__ )[0] UpperCAmelCase_ : Optional[int] = 4_23_84 UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , __magic_name__ ) UpperCAmelCase_ : List[Any] = torch.tensor( [[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__magic_name__ ) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__magic_name__ ) UpperCAmelCase_ : Optional[int] = model.generate( **__magic_name__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__magic_name__ , ) UpperCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = ( '''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the''' ''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and''' ''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),''' ''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and''' ''' more than 800,000 deaths.''' ) self.assertEqual(__magic_name__ , __magic_name__ )
644
'''simple docstring''' class __a : def __init__( self : List[Any] , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : Optional[Any] = size UpperCAmelCase_ : Tuple = [0] * size UpperCAmelCase_ : Optional[Any] = [0] * size @staticmethod def UpperCAmelCase__ ( __magic_name__ : int ) -> int: """simple docstring""" return index | (index + 1) @staticmethod def UpperCAmelCase__ ( __magic_name__ : int ) -> int: """simple docstring""" return (index & (index + 1)) - 1 def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : int = value while index < self.size: UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1 if current_left_border == index: UpperCAmelCase_ : List[str] = value else: UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ ) def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" right -= 1 # Because of right is exclusive UpperCAmelCase_ : List[str] = 0 while left <= right: UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ ) if left <= current_left: UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] ) UpperCAmelCase_ : Optional[Any] = current_left else: UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
644
1
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __a (lowerCamelCase ): def __init__( self : int , __magic_name__ : TransformeraDModel , __magic_name__ : AutoencoderKL , __magic_name__ : KarrasDiffusionSchedulers , __magic_name__ : Optional[Dict[int, str]] = None , ) -> int: """simple docstring""" super().__init__() self.register_modules(transformer=__magic_name__ , vae=__magic_name__ , scheduler=__magic_name__ ) # create a imagenet -> id dictionary for easier use UpperCAmelCase_ : Optional[Any] = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(''',''' ): UpperCAmelCase_ : Optional[int] = int(__magic_name__ ) UpperCAmelCase_ : str = dict(sorted(self.labels.items() ) ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, List[str]] ) -> List[int]: """simple docstring""" if not isinstance(__magic_name__ , __magic_name__ ): UpperCAmelCase_ : Optional[Any] = list(__magic_name__ ) for l in label: if l not in self.labels: raise ValueError( F"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self : Dict , __magic_name__ : List[int] , __magic_name__ : float = 4.0 , __magic_name__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __magic_name__ : int = 50 , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" UpperCAmelCase_ : int = len(__magic_name__ ) UpperCAmelCase_ : str = self.transformer.config.sample_size UpperCAmelCase_ : Optional[Any] = self.transformer.config.in_channels UpperCAmelCase_ : Dict = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__magic_name__ , device=self.device , dtype=self.transformer.dtype , ) UpperCAmelCase_ : Dict = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents UpperCAmelCase_ : Any = torch.tensor(__magic_name__ , device=self.device ).reshape(-1 ) UpperCAmelCase_ : Optional[int] = torch.tensor([10_00] * batch_size , device=self.device ) UpperCAmelCase_ : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(__magic_name__ ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: UpperCAmelCase_ : str = latent_model_input[: len(__magic_name__ ) // 2] UpperCAmelCase_ : List[Any] = torch.cat([half, half] , dim=0 ) UpperCAmelCase_ : Union[str, Any] = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : Union[str, Any] = t if not torch.is_tensor(__magic_name__ ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) UpperCAmelCase_ : str = latent_model_input.device.type == '''mps''' if isinstance(__magic_name__ , __magic_name__ ): UpperCAmelCase_ : str = torch.floataa if is_mps else torch.floataa else: UpperCAmelCase_ : Optional[int] = torch.intaa if is_mps else torch.intaa UpperCAmelCase_ : Optional[int] = torch.tensor([timesteps] , dtype=__magic_name__ , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: UpperCAmelCase_ : Optional[Any] = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML UpperCAmelCase_ : Dict = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output UpperCAmelCase_ : Optional[int] = self.transformer( __magic_name__ , timestep=__magic_name__ , class_labels=__magic_name__ ).sample # perform guidance if guidance_scale > 1: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = torch.split(__magic_name__ , len(__magic_name__ ) // 2 , dim=0 ) UpperCAmelCase_ : Dict = uncond_eps + guidance_scale * (cond_eps - uncond_eps) UpperCAmelCase_ : Dict = torch.cat([half_eps, half_eps] , dim=0 ) UpperCAmelCase_ : str = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = torch.split(__magic_name__ , __magic_name__ , dim=1 ) else: UpperCAmelCase_ : Tuple = noise_pred # compute previous image: x_t -> x_t-1 UpperCAmelCase_ : Any = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample if guidance_scale > 1: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = latent_model_input.chunk(2 , dim=0 ) else: UpperCAmelCase_ : Union[str, Any] = latent_model_input UpperCAmelCase_ : List[str] = 1 / self.vae.config.scaling_factor * latents UpperCAmelCase_ : Any = self.vae.decode(__magic_name__ ).sample UpperCAmelCase_ : Optional[Any] = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase_ : List[Any] = self.numpy_to_pil(__magic_name__ ) if not return_dict: return (samples,) return ImagePipelineOutput(images=__magic_name__ )
644
'''simple docstring''' import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __a : def __init__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=99 , __magic_name__ : Tuple=32 , __magic_name__ : int=5 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[int]=None , ) -> str: """simple docstring""" UpperCAmelCase_ : Any = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : List[Any] = seq_length UpperCAmelCase_ : str = is_training UpperCAmelCase_ : Any = use_input_mask UpperCAmelCase_ : List[str] = use_token_type_ids UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : Optional[Any] = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : Optional[int] = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : str = type_vocab_size UpperCAmelCase_ : Optional[Any] = type_sequence_label_size UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : int = num_labels UpperCAmelCase_ : Optional[int] = num_choices UpperCAmelCase_ : Tuple = scope def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : str = None if self.use_token_type_ids: UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = BioGptModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptForCausalLM(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , *__magic_name__ : Any ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() # create attention mask UpperCAmelCase_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ ) UpperCAmelCase_ : Any = self.seq_length // 2 UpperCAmelCase_ : Tuple = 0 # first forward pass UpperCAmelCase_ , UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ).to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids UpperCAmelCase_ : List[str] = ids_tensor((1,) , __magic_name__ ).item() + 1 UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) UpperCAmelCase_ : str = random_other_next_tokens # append to next input_ids and attn_mask UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : int = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__magic_name__ )] , dim=1 , ) # get two different outputs UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] UpperCAmelCase_ : int = model(__magic_name__ , past_key_values=__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] # select random slice UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach() UpperCAmelCase_ : Dict = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , *__magic_name__ : str ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval() UpperCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ ) # first forward pass UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[ '''last_hidden_state''' ] # select random slice UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , *__magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = BioGptForCausalLM(__magic_name__ ) model.to(__magic_name__ ) if gradient_checkpointing: model.gradient_checkpointing_enable() UpperCAmelCase_ : List[str] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : List[str] ) -> str: """simple docstring""" UpperCAmelCase_ : int = BioGptModel(__magic_name__ ) UpperCAmelCase_ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 ) def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , *__magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : Any = BioGptForTokenClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : int = config_and_inputs UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : str = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) __a : List[Any] = (BioGptForCausalLM,) if is_torch_available() else () __a : Union[str, Any] = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) __a : List[str] = False def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" UpperCAmelCase_ : List[str] = BioGptModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : str = type self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*__magic_name__ , gradient_checkpointing=__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[str]: """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ ) def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__magic_name__ ) UpperCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : Tuple = '''left''' # Define PAD Token = EOS Token = 50256 UpperCAmelCase_ : List[Any] = tokenizer.eos_token UpperCAmelCase_ : List[Any] = model.config.eos_token_id # use different length sentences to test batching UpperCAmelCase_ : Tuple = [ '''Hello, my dog is a little''', '''Today, I''', ] UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = inputs['''input_ids'''].to(__magic_name__ ) UpperCAmelCase_ : Any = model.generate( input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''].to(__magic_name__ ) , ) UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__magic_name__ ) UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ ) UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item() UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__magic_name__ ) UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings ) UpperCAmelCase_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = [ '''Hello, my dog is a little bit bigger than a little bit.''', '''Today, I have a good idea of how to use the information''', ] self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[str] = 3 UpperCAmelCase_ : Tuple = input_dict['''input_ids'''] UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(__magic_name__ ) UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase_ : Dict = BioGptForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[Any] = 3 UpperCAmelCase_ : Optional[int] = '''multi_label_classification''' UpperCAmelCase_ : int = input_dict['''input_ids'''] UpperCAmelCase_ : str = input_ids.ne(1 ).to(__magic_name__ ) UpperCAmelCase_ : Any = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __a (unittest.TestCase ): @slow def UpperCAmelCase__ ( self : List[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) UpperCAmelCase_ : str = model(__magic_name__ )[0] UpperCAmelCase_ : Optional[int] = 4_23_84 UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , __magic_name__ ) UpperCAmelCase_ : List[Any] = torch.tensor( [[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__magic_name__ ) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__magic_name__ ) UpperCAmelCase_ : Optional[int] = model.generate( **__magic_name__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__magic_name__ , ) UpperCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = ( '''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the''' ''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and''' ''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),''' ''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and''' ''' more than 800,000 deaths.''' ) self.assertEqual(__magic_name__ , __magic_name__ )
644
1
'''simple docstring''' import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __a (lowerCamelCase , unittest.TestCase ): # TODO: is there an appropriate internal test set? __a : Optional[Any] = "ssube/stable-diffusion-x4-upscaler-onnx" def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, Any]=0 ) -> List[str]: """simple docstring""" UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__magic_name__ ) ) UpperCAmelCase_ : List[str] = torch.manual_seed(__magic_name__ ) UpperCAmelCase_ : int = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def UpperCAmelCase__ ( self : Dict ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : List[str] = self.get_dummy_inputs() UpperCAmelCase_ : List[Any] = pipe(**__magic_name__ ).images UpperCAmelCase_ : str = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Optional[Any] = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) UpperCAmelCase_ : Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : int = self.get_dummy_inputs() UpperCAmelCase_ : Union[str, Any] = pipe(**__magic_name__ ).images UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Any = np.array( [0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" UpperCAmelCase_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) UpperCAmelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs() UpperCAmelCase_ : str = pipe(**__magic_name__ ).images UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Dict = np.array( [0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCAmelCase__ ( self : int ) -> Dict: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) UpperCAmelCase_ : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : List[str] = self.get_dummy_inputs() UpperCAmelCase_ : Optional[int] = pipe(**__magic_name__ ).images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Optional[Any] = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) UpperCAmelCase_ : Dict = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : List[Any] = self.get_dummy_inputs() UpperCAmelCase_ : int = pipe(**__magic_name__ ).images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Union[str, Any] = np.array( [0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __a (unittest.TestCase ): @property def UpperCAmelCase__ ( self : Any ) -> int: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" UpperCAmelCase_ : Dict = ort.SessionOptions() UpperCAmelCase_ : Optional[int] = False return options def UpperCAmelCase__ ( self : Any ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) UpperCAmelCase_ : int = init_image.resize((1_28, 1_28) ) # using the PNDM scheduler by default UpperCAmelCase_ : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : List[Any] = '''A fantasy landscape, trending on artstation''' UpperCAmelCase_ : str = torch.manual_seed(0 ) UpperCAmelCase_ : List[Any] = pipe( prompt=__magic_name__ , image=__magic_name__ , guidance_scale=7.5 , num_inference_steps=10 , generator=__magic_name__ , output_type='''np''' , ) UpperCAmelCase_ : List[str] = output.images UpperCAmelCase_ : Dict = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : int = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def UpperCAmelCase__ ( self : int ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) UpperCAmelCase_ : int = init_image.resize((1_28, 1_28) ) UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' ) UpperCAmelCase_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=__magic_name__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : List[Any] = '''A fantasy landscape, trending on artstation''' UpperCAmelCase_ : Any = torch.manual_seed(0 ) UpperCAmelCase_ : Dict = pipe( prompt=__magic_name__ , image=__magic_name__ , guidance_scale=7.5 , num_inference_steps=20 , generator=__magic_name__ , output_type='''np''' , ) UpperCAmelCase_ : int = output.images UpperCAmelCase_ : List[str] = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Optional[Any] = np.array( [0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
644
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class __a (lowerCamelCase , unittest.TestCase ): __a : List[str] = BlenderbotSmallTokenizer __a : List[Any] = False def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" super().setUp() UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__magic_name__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple: """simple docstring""" kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = '''adapt act apte''' UpperCAmelCase_ : Tuple = '''adapt act apte''' return input_text, output_text def UpperCAmelCase__ ( self : str ) -> Any: """simple docstring""" UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase_ : List[Any] = '''adapt act apte''' UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te'''] UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] UpperCAmelCase_ : Optional[int] = '''I am a small frog.''' UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids'''] UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) UpperCAmelCase_ : List[Any] = '''I am a small frog .''' UpperCAmelCase_ : Any = '''.''' UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids'''] UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
644
1