code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import random from typing import Any def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list[Any]: for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): lowercase : List[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ) lowercase : Any = random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ) lowercase , lowercase : int = data[b], data[a] return data if __name__ == "__main__": lowercase : str = [0, 1, 2, 3, 4, 5, 6, 7] lowercase : List[str] = ["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
20
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
0
from math import factorial SCREAMING_SNAKE_CASE : Dict = {str(d): factorial(d) for d in range(10)} def UpperCamelCase_( lowerCamelCase_ ) -> int: return sum(DIGIT_FACTORIAL[d] for d in str(lowerCamelCase_ ) ) def UpperCamelCase_( ) -> int: _lowercase : str = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , lowerCamelCase_ ) if sum_of_digit_factorial(lowerCamelCase_ ) == i ) if __name__ == "__main__": print(F"{solution() = }")
21
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCamelCase_ = get_logger(__name__) class _UpperCAmelCase : """simple docstring""" snake_case = '''dummy_data''' snake_case = '''datasets''' snake_case = False def __init__( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Union[Version, str] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[List[Callable]] = None , ): '''simple docstring''' _A = 0 _A = dataset_name _A = cache_dir _A = use_local_dummy_data _A = config # download_callbacks take a single url as input _A = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _A = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _A = str(__UpperCAmelCase ) # to be downloaded _A = None _A = None @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self._dummy_file is None: _A = self.download_dummy_data() return self._dummy_file @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _A = cached_path( __UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase ) return os.path.join(__UpperCAmelCase , self.dummy_file_name ) @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' if self._bucket_url is None: _A = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def lowerCAmelCase ( self : str ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , *__UpperCAmelCase : Dict ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _A = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _A = self.dummy_file_name # special case when data_url is a dict if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase ) elif isinstance(__UpperCAmelCase , (list, tuple) ): return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase ) else: return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : Any ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : List[str] ): '''simple docstring''' return path def lowerCAmelCase ( self : str ): '''simple docstring''' return {} def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): for single_url in single_urls: download_callback(__UpperCAmelCase ) else: _A = single_urls download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) for x in single_urls] else: _A = single_urls _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) _A = value # make sure that values are unique if all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _A = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __UpperCAmelCase ) ) for url in data_url ) _A = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _A = [data_url[0]] * len(__UpperCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__UpperCAmelCase ) return dummy_data_list def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__UpperCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass def lowerCAmelCase ( self : Dict ): '''simple docstring''' pass def lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' def _iter_archive_members(__UpperCAmelCase : List[Any] ): # this preserves the order of the members inside the ZIP archive _A = Path(self.dummy_file ).parent _A = path.relative_to(__UpperCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _A = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__UpperCAmelCase ) _A = Path(__UpperCAmelCase ) _A = _iter_archive_members(__UpperCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__UpperCAmelCase ).as_posix(), file_path.open("rb" ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [paths] for path in paths: if os.path.isfile(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__UpperCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__UpperCAmelCase , __UpperCAmelCase )
79
0
'''simple docstring''' def UpperCAmelCase_ ( ) -> list[list[int]]: '''simple docstring''' return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] __SCREAMING_SNAKE_CASE :str = generate_large_matrix() __SCREAMING_SNAKE_CASE :Any = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def UpperCAmelCase_ ( __lowercase : list[list[int]] ) -> None: '''simple docstring''' assert all(row == sorted(__lowercase , reverse=__lowercase ) for row in grid ) assert all(list(__lowercase ) == sorted(__lowercase , reverse=__lowercase ) for col in zip(*__lowercase ) ) def UpperCAmelCase_ ( __lowercase : list[int] ) -> int: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = len(__lowercase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: _UpperCAmelCase = (left + right) // 2 _UpperCAmelCase = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: _UpperCAmelCase = mid + 1 else: _UpperCAmelCase = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__lowercase ) def UpperCAmelCase_ ( __lowercase : list[list[int]] ) -> int: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = len(grid[0] ) for i in range(len(__lowercase ) ): _UpperCAmelCase = find_negative_index(grid[i][:bound] ) total += bound return (len(__lowercase ) * len(grid[0] )) - total def UpperCAmelCase_ ( __lowercase : list[list[int]] ) -> int: '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def UpperCAmelCase_ ( __lowercase : list[list[int]] ) -> int: '''simple docstring''' _UpperCAmelCase = 0 for row in grid: for i, number in enumerate(__lowercase ): if number < 0: total += len(__lowercase ) - i break return total def UpperCAmelCase_ ( ) -> None: '''simple docstring''' from timeit import timeit print("Running benchmarks" ) _UpperCAmelCase = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): _UpperCAmelCase = timeit(f'{func}(grid=grid)' , setup=__lowercase , number=500 ) print(f'{func}() took {time:0.4f} seconds' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
22
'''simple docstring''' def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]: '''simple docstring''' if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ): _A = len(set_a.intersection(__lowercase ) ) if alternative_union: _A = len(__lowercase ) + len(__lowercase ) else: _A = len(set_a.union(__lowercase ) ) return intersection / union if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ): _A = [element for element in set_a if element in set_b] if alternative_union: _A = len(__lowercase ) + len(__lowercase ) return len(__lowercase ) / union else: _A = set_a + [element for element in set_b if element not in set_a] return len(__lowercase ) / len(__lowercase ) return len(__lowercase ) / len(__lowercase ) return None if __name__ == "__main__": lowerCamelCase_ = {'''a''', '''b''', '''c''', '''d''', '''e'''} lowerCamelCase_ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''} print(jaccard_similarity(set_a, set_b))
79
0
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging UpperCamelCase__: Tuple = logging.get_logger(__name__) def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str]=False ) -> List[str]: try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: UpperCAmelCase : Optional[Any] = os.path.abspath(_lowerCAmelCase ) logger.info(f"""Loading PyTorch weights from {pt_path}""" ) UpperCAmelCase : List[str] = torch.load(_lowerCAmelCase , map_location='''cpu''' ) logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" ) UpperCAmelCase : Any = convert_pytorch_state_dict_to_flax(_lowerCAmelCase , _lowerCAmelCase ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files UpperCAmelCase : List[Any] = convert_pytorch_sharded_state_dict_to_flax(_lowerCAmelCase , _lowerCAmelCase ) return flax_state_dict def snake_case_ ( _lowerCAmelCase : Tuple[str] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, jnp.ndarray] , _lowerCAmelCase : str , ) -> (Tuple[str], np.ndarray): def is_key_or_prefix_key_in_dict(_lowerCAmelCase : Tuple[str] ) -> bool: return len(set(_lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0 # layer norm UpperCAmelCase : Dict = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_lowerCAmelCase ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean UpperCAmelCase : List[Any] = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var UpperCAmelCase : str = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ): return renamed_pt_tuple_key, pt_tensor # embedding UpperCAmelCase : Dict = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_lowerCAmelCase ): return renamed_pt_tuple_key, pt_tensor # conv layer UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ): UpperCAmelCase : Any = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer UpperCAmelCase : Dict = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ): UpperCAmelCase : Dict = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias UpperCAmelCase : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 UpperCAmelCase : str = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): UpperCAmelCase : Optional[int] = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): UpperCAmelCase : Union[str, Any] = pt_tuple_key[-2] + '''_v''' if name is not None: UpperCAmelCase : List[Any] = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ) -> Optional[Any]: # convert pytorch tensor to numpy UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} UpperCAmelCase : Any = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: UpperCAmelCase : Optional[Any] = flax_model.params['''params'''] else: UpperCAmelCase : Any = flax_model.params UpperCAmelCase : str = flatten_dict(_lowerCAmelCase ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: UpperCAmelCase : List[str] = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(_lowerCAmelCase ) UpperCAmelCase : str = {} UpperCAmelCase : Union[str, Any] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) UpperCAmelCase : Optional[Any] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCAmelCase : Optional[Any] = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary UpperCAmelCase : Dict = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: UpperCAmelCase : Dict = pt_tuple_key[1:] # Correctly rename weight parameters UpperCAmelCase , UpperCAmelCase : List[Any] = rename_key_and_reshape_tensor( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # add model prefix if necessary UpperCAmelCase : Optional[int] = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: UpperCAmelCase : Any = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: UpperCAmelCase : Dict = jnp.asarray(_lowerCAmelCase ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(_lowerCAmelCase , _lowerCAmelCase ) continue # also add unexpected weight so that warning is thrown UpperCAmelCase : List[str] = jnp.asarray(_lowerCAmelCase ) else: # also add unexpected weight so that warning is thrown UpperCAmelCase : str = jnp.asarray(_lowerCAmelCase ) return unflatten_dict(_lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any ) -> str: import torch # Load the index UpperCAmelCase : int = {} for shard_file in shard_filenames: # load using msgpack utils UpperCAmelCase : str = torch.load(_lowerCAmelCase ) UpperCAmelCase : int = {k: v.numpy() for k, v in pt_state_dict.items()} UpperCAmelCase : Optional[int] = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: UpperCAmelCase : str = flax_model.params['''params'''] UpperCAmelCase : int = flatten_dict(_lowerCAmelCase ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: UpperCAmelCase : Any = flax_model.params UpperCAmelCase : List[Any] = flatten_dict(_lowerCAmelCase ) UpperCAmelCase : str = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) UpperCAmelCase : Optional[Any] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCAmelCase : Any = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary UpperCAmelCase : str = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: UpperCAmelCase : str = pt_tuple_key[1:] # Correctly rename weight parameters UpperCAmelCase , UpperCAmelCase : str = rename_key_and_reshape_tensor( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # add model prefix if necessary UpperCAmelCase : int = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: UpperCAmelCase : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: UpperCAmelCase : Tuple = jnp.asarray(_lowerCAmelCase ) continue if "var" in flax_key[-1]: UpperCAmelCase : Tuple = jnp.asarray(_lowerCAmelCase ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(_lowerCAmelCase , _lowerCAmelCase ) continue # also add unexpected weight so that warning is thrown UpperCAmelCase : int = jnp.asarray(_lowerCAmelCase ) else: # also add unexpected weight so that warning is thrown UpperCAmelCase : Any = jnp.asarray(_lowerCAmelCase ) return unflatten_dict(_lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] ) -> Tuple: UpperCAmelCase : Any = os.path.abspath(_lowerCAmelCase ) logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" ) # import correct flax class UpperCAmelCase : Dict = getattr(_lowerCAmelCase , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(_lowerCAmelCase , '''rb''' ) as state_f: try: UpperCAmelCase : Tuple = from_bytes(_lowerCAmelCase , state_f.read() ) except UnpicklingError: raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(_lowerCAmelCase , _lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str ) -> Any: try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights UpperCAmelCase : Tuple = flatten_dict(jax.tree_util.tree_map(lambda _lowerCAmelCase : x.dtype == jnp.bfloataa , _lowerCAmelCase ) ).values() if any(_lowerCAmelCase ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) UpperCAmelCase : Any = jax.tree_util.tree_map( lambda _lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCAmelCase ) UpperCAmelCase : Optional[int] = flatten_dict(_lowerCAmelCase ) UpperCAmelCase : str = pt_model.state_dict() UpperCAmelCase : List[str] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) UpperCAmelCase : Any = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys UpperCAmelCase : Dict = [] UpperCAmelCase : Union[str, Any] = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): UpperCAmelCase : Any = flax_key_tuple[0] == pt_model.base_model_prefix UpperCAmelCase : Optional[Any] = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: UpperCAmelCase : Tuple = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: UpperCAmelCase : str = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_lowerCAmelCase ) not in pt_model_dict: # conv layer UpperCAmelCase : str = flax_key_tuple[:-1] + ('''weight''',) UpperCAmelCase : List[str] = jnp.transpose(_lowerCAmelCase , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ) not in pt_model_dict: # linear layer UpperCAmelCase : Any = flax_key_tuple[:-1] + ('''weight''',) UpperCAmelCase : List[Any] = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: UpperCAmelCase : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: UpperCAmelCase : Dict = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: UpperCAmelCase : str = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: UpperCAmelCase : Union[str, Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: UpperCAmelCase : Any = '''.'''.join(_lowerCAmelCase ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. UpperCAmelCase : Union[str, Any] = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: UpperCAmelCase : str = key.split('''.''' ) UpperCAmelCase : Dict = None if key_components[-3::2] == ["parametrizations", "original0"]: UpperCAmelCase : Dict = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: UpperCAmelCase : str = key_components[-2] + '''_v''' if name is not None: UpperCAmelCase : Any = key_components[:-3] + [name] UpperCAmelCase : Union[str, Any] = '''.'''.join(_lowerCAmelCase ) UpperCAmelCase : Dict = key if flax_key in special_pt_names: UpperCAmelCase : Optional[Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict UpperCAmelCase : Optional[int] = np.asarray(_lowerCAmelCase ) if not isinstance(_lowerCAmelCase , np.ndarray ) else flax_tensor UpperCAmelCase : Optional[int] = torch.from_numpy(_lowerCAmelCase ) # remove from missing keys missing_keys.remove(_lowerCAmelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(_lowerCAmelCase ) pt_model.load_state_dict(_lowerCAmelCase ) # re-transform missing_keys to list UpperCAmelCase : Any = list(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" ) if len(_lowerCAmelCase ) > 0: logger.warning( f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" ''' use it for predictions and inference.''' ) else: logger.warning( f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n""" '''If your task is similar to the task the model of the checkpoint was trained on, ''' f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" ) return pt_model
23
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 0 snake_case = False snake_case = 3.0 class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def lowerCAmelCase ( self : int ): '''simple docstring''' _A = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _A = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _A = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , __UpperCAmelCase ) @require_multi_gpu def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowerCamelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCamelCase_ = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCamelCase_ = torch.nn.Linear(1_00, 2_00) lowerCamelCase_ = accelerator.prepare(model) # Check the values changed in kwargs lowerCamelCase_ = '''''' lowerCamelCase_ = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
79
0
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ): A_ : Optional[datasets.Features] = None class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ): A_ : Optional[int] = PandasConfig def a (self : List[Any] ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def a (self : Optional[int] , a__ : Union[str, Any] ): """simple docstring""" if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) __snake_case = dl_manager.download_and_extract(self.config.data_files ) if isinstance(a__ , (str, list, tuple) ): __snake_case = data_files if isinstance(a__ , a__ ): __snake_case = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case = [dl_manager.iter_files(a__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __snake_case = [] for split_name, files in data_files.items(): if isinstance(a__ , a__ ): __snake_case = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case = [dl_manager.iter_files(a__ ) for file in files] splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={'''files''': files} ) ) return splits def a (self : Tuple , a__ : pa.Table ): """simple docstring""" if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __snake_case = table_cast(a__ , self.config.features.arrow_schema ) return pa_table def a (self : Union[str, Any] , a__ : str ): """simple docstring""" for i, file in enumerate(itertools.chain.from_iterable(a__ ) ): with open(a__ , '''rb''' ) as f: __snake_case = pa.Table.from_pandas(pd.read_pickle(a__ ) ) yield i, self._cast_table(a__ )
24
'''simple docstring''' def __lowercase ( __lowercase = 100 ) -> int: '''simple docstring''' _A = n * (n + 1) * (2 * n + 1) / 6 _A = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
79
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase__ : int = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : int = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Union[str, Any] = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : int = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys UpperCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
25
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') lowerCamelCase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the training data.'''} ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the validation data.'''} ) snake_case = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) snake_case = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) snake_case = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) snake_case = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : Optional[int]=192 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=0.6 ): '''simple docstring''' _A = input_size _A = mask_patch_size _A = model_patch_size _A = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("Input size must be divisible by mask patch size" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("Mask patch size must be divisible by model patch size" ) _A = self.input_size // self.mask_patch_size _A = self.mask_patch_size // self.model_patch_size _A = self.rand_size**2 _A = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : Any ): '''simple docstring''' _A = np.random.permutation(self.token_count )[: self.mask_count] _A = np.zeros(self.token_count , dtype=__UpperCAmelCase ) _A = 1 _A = mask.reshape((self.rand_size, self.rand_size) ) _A = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def __lowercase ( __lowercase ) -> str: '''simple docstring''' _A = torch.stack([example["pixel_values"] for example in examples] ) _A = torch.stack([example["mask"] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def __lowercase ( ) -> Dict: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mim" , __lowercase , __lowercase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(__lowercase ) transformers.utils.logging.set_verbosity(__lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if "validation" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0: _A = ds["train"].train_test_split(data_args.train_val_split ) _A = split["train"] _A = split["test"] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: _A = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowercase ) elif model_args.model_name_or_path: _A = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(__lowercase , "decoder_type" ): _A = "simmim" # adapt config _A = model_args.image_size if model_args.image_size is not None else config.image_size _A = model_args.patch_size if model_args.patch_size is not None else config.patch_size _A = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { "image_size": model_args.image_size, "patch_size": model_args.patch_size, "encoder_stride": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: _A = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase ) elif model_args.model_name_or_path: _A = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } _A = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: _A = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) _A = AutoModelForMaskedImageModeling.from_config(__lowercase ) if training_args.do_train: _A = ds["train"].column_names else: _A = ds["validation"].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = "image" elif "img" in column_names: _A = "img" else: _A = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py _A = Compose( [ Lambda(lambda __lowercase : img.convert("RGB" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator _A = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(__lowercase ): _A = [transforms(__lowercase ) for image in examples[image_column_name]] _A = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: _A = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__lowercase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: _A = ( ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__lowercase ) # Initialize our trainer _A = Trainer( model=__lowercase , args=__lowercase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics("eval" , __lowercase ) trainer.save_metrics("eval" , __lowercase ) # Write model card and (optionally) push to hub _A = { "finetuned_from": model_args.model_name_or_path, "tasks": "masked-image-modeling", "dataset": data_args.dataset_name, "tags": ["masked-image-modeling"], } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase ) else: trainer.create_model_card(**__lowercase ) if __name__ == "__main__": main()
79
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _snake_case = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
26
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''canine''' def __init__( self : Dict , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : int=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[Any]=16384 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Dict=1E-12 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : int=0xE000 , __UpperCAmelCase : List[Any]=0xE001 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : int=16384 , __UpperCAmelCase : Union[str, Any]=128 , **__UpperCAmelCase : Dict , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = type_vocab_size _A = layer_norm_eps # Character config: _A = downsampling_rate _A = upsampling_kernel_size _A = num_hash_functions _A = num_hash_buckets _A = local_transformer_stride
79
0
'''simple docstring''' import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class __UpperCamelCase ( lowerCAmelCase_ ): A_ = (DDIMParallelScheduler,) A_ = (("eta", 0.0), ("num_inference_steps", 50)) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' __a : Optional[Any] = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'clip_sample': True, } config.update(**__a ) return config def __UpperCAmelCase ( self , **__a ): '''simple docstring''' __a : int = self.scheduler_classes[0] __a : List[Any] = self.get_scheduler_config(**__a ) __a : Tuple = scheduler_class(**__a ) __a , __a : Optional[Any] = 10, 0.0 __a : List[str] = self.dummy_model() __a : Optional[int] = self.dummy_sample_deter scheduler.set_timesteps(__a ) for t in scheduler.timesteps: __a : Dict = model(__a , __a ) __a : Tuple = scheduler.step(__a , __a , __a , __a ).prev_sample return sample def __UpperCAmelCase ( self ): '''simple docstring''' for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__a ) __a : Tuple = self.scheduler_classes[0] __a : Optional[Any] = self.get_scheduler_config(steps_offset=1 ) __a : Optional[int] = scheduler_class(**__a ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def __UpperCAmelCase ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' self.check_over_configs(thresholding=__a ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=__a , prediction_type=__a , sample_max_value=__a , ) def __UpperCAmelCase ( self ): '''simple docstring''' for t in [1, 10, 49]: self.check_over_forward(time_step=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=__a , num_inference_steps=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=__a , eta=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.scheduler_classes[0] __a : List[str] = self.get_scheduler_config() __a : List[Any] = scheduler_class(**__a ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = self.scheduler_classes[0] __a : Any = self.get_scheduler_config() __a : int = scheduler_class(**__a ) __a , __a : int = 10, 0.0 scheduler.set_timesteps(__a ) __a : Optional[int] = self.dummy_model() __a : List[Any] = self.dummy_sample_deter __a : Union[str, Any] = self.dummy_sample_deter + 0.1 __a : List[Any] = self.dummy_sample_deter - 0.1 __a : Dict = samplea.shape[0] __a : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 ) __a : Dict = torch.arange(__a )[0:3, None].repeat(1 , __a ) __a : Dict = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) __a : Union[str, Any] = scheduler.batch_step_no_noise(__a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __a ) __a : Any = torch.sum(torch.abs(__a ) ) __a : Tuple = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 1147.7904 ) < 1E-2 assert abs(result_mean.item() - 0.4982 ) < 1E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.full_loop() __a : Optional[Any] = torch.sum(torch.abs(__a ) ) __a : str = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 172.0067 ) < 1E-2 assert abs(result_mean.item() - 0.223967 ) < 1E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.full_loop(prediction_type='v_prediction' ) __a : List[str] = torch.sum(torch.abs(__a ) ) __a : str = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 52.5302 ) < 1E-2 assert abs(result_mean.item() - 0.0684 ) < 1E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.full_loop(set_alpha_to_one=__a , beta_start=0.01 ) __a : List[str] = torch.sum(torch.abs(__a ) ) __a : Union[str, Any] = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 149.8295 ) < 1E-2 assert abs(result_mean.item() - 0.1951 ) < 1E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.full_loop(set_alpha_to_one=__a , beta_start=0.01 ) __a : Optional[Any] = torch.sum(torch.abs(__a ) ) __a : List[str] = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 149.0784 ) < 1E-2 assert abs(result_mean.item() - 0.1941 ) < 1E-3
27
'''simple docstring''' class _UpperCAmelCase : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : list[int] ): '''simple docstring''' _A = len(__UpperCAmelCase ) _A = [0] * len_array if len_array > 0: _A = array[0] for i in range(1 , __UpperCAmelCase ): _A = self.prefix_sum[i - 1] + array[i] def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ): '''simple docstring''' _A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__UpperCAmelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
79
0
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def A ( self : str ): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=UpperCamelCase__ , ) assert hasattr(self , 'env' ) def A ( self : Optional[Any] , UpperCamelCase__ : Any ): """simple docstring""" UpperCamelCase = { 'enabled': True, 'processes_per_host': 8, } UpperCamelCase = { 'enabled': True, 'parameters': { 'microbatches': 4, 'placement_strategy': 'spread', 'pipeline': 'interleaved', 'optimize': 'speed', 'partitions': 4, 'ddp': True, }, } UpperCamelCase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options} UpperCamelCase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer' # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={ **self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path, 'max_steps': 5_0_0, } , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='py36' , ) def A ( self : Optional[int] , UpperCamelCase__ : int ): """simple docstring""" TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def A ( self : List[str] , UpperCamelCase__ : Tuple ): """simple docstring""" UpperCamelCase = self.create_estimator(UpperCamelCase__ ) # run training estimator.fit() # result dataframe UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] ) UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCamelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy ) assert all(t <= self.results['eval_loss'] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , UpperCamelCase__ )
28
'''simple docstring''' from typing import List import numpy as np def __lowercase ( __lowercase ) -> int: '''simple docstring''' _A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) _A = max(lists_lengths.values() , default=0 ) return max(1 , __lowercase ) def __lowercase ( __lowercase , __lowercase ) -> List[range]: '''simple docstring''' _A = [] for group_idx in range(__lowercase ): _A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _A = range(__lowercase , start + num_shards_to_add ) shards_indices_per_group.append(__lowercase ) return shards_indices_per_group def __lowercase ( __lowercase , __lowercase ) -> List[dict]: '''simple docstring''' _A = _number_of_shards_in_gen_kwargs(__lowercase ) if num_shards == 1: return [dict(__lowercase )] else: _A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__lowercase , __lowercase ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__lowercase ) ) ] def __lowercase ( __lowercase ) -> dict: '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __lowercase ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __lowercase ( __lowercase , __lowercase ) -> dict: '''simple docstring''' _A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )} _A = {} for size in list_sizes: _A = list(range(__lowercase ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _A = dict(__lowercase ) for key, value in shuffled_kwargs.items(): if isinstance(__lowercase , __lowercase ): _A = [value[i] for i in indices_per_size[len(__lowercase )]] return shuffled_kwargs
79
0
import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : int = 0 _snake_case : bool = False _snake_case : float = 3.0 class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Dict: # If no defaults are changed, `to_kwargs` returns an empty dict. self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} ) self.assertDictEqual(MockClass(a=2 , b=_UpperCamelCase ).to_kwargs() , {'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} ) @require_cuda def __UpperCAmelCase ( self ) -> Optional[Any]: # If no defaults are changed, `to_kwargs` returns an empty dict. UpperCAmelCase_ : Optional[Any] = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 ) AcceleratorState._reset_state() UpperCAmelCase_ : Union[str, Any] = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) UpperCAmelCase_ : Any = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 10_24.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2_0_0_0 ) self.assertEqual(scaler._enabled , _UpperCamelCase ) @require_multi_gpu def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : int = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() ) if __name__ == "__main__": __UpperCAmelCase = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) __UpperCAmelCase = Accelerator(kwargs_handlers=[ddp_scaler]) __UpperCAmelCase = torch.nn.Linear(100, 200) __UpperCAmelCase = accelerator.prepare(model) # Check the values changed in kwargs __UpperCAmelCase = '' __UpperCAmelCase = model.bucket_bytes_cap // (1024 * 1024) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
29
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_jukebox''': [ '''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''JukeboxConfig''', '''JukeboxPriorConfig''', '''JukeboxVQVAEConfig''', ], '''tokenization_jukebox''': ['''JukeboxTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''JukeboxModel''', '''JukeboxPreTrainedModel''', '''JukeboxVQVAE''', '''JukeboxPrior''', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore __a = '\nHuman: <<task>>\n\nAssistant: ' __a = 'huggingface-tools/default-prompts' __a = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'} def a ( snake_case__: Union[str, Any] , snake_case__: str , snake_case__: Tuple="run" ): '''simple docstring''' if prompt_or_repo_id is None: lowercase_ = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search('''\\s''' , snake_case__ ) is not None: return prompt_or_repo_id lowercase_ = cached_file( snake_case__ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} ) with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f: return f.read()
30
'''simple docstring''' from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Union[str, Any] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None ): '''simple docstring''' super().__init__() _A = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" _A = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ) else: _A = None _A = torch.nn.Parameter(__UpperCAmelCase ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 def __init__( self : Any , __UpperCAmelCase : VQModel , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : TransformeraDModel , __UpperCAmelCase : VQDiffusionScheduler , __UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ): '''simple docstring''' super().__init__() self.register_modules( vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ): '''simple docstring''' _A = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1 # get prompt text embeddings _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) _A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) _A = text_input_ids[:, : self.tokenizer.model_max_length] _A = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 _A = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate text embeddings for each generation per prompt _A = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: _A = self.learned_classifier_free_sampling_embeddings.embeddings _A = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 ) else: _A = [""] * batch_size _A = text_input_ids.shape[-1] _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , ) _A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings _A = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _A = negative_prompt_embeds.shape[1] _A = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 ) _A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self : Optional[Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 5.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = 1 elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = len(__UpperCAmelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' ) _A = batch_size * num_images_per_prompt _A = guidance_scale > 1.0 _A = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(__UpperCAmelCase )}.''' ) # get the initial completely masked latents unless the user supplied it _A = (batch_size, self.transformer.num_latent_pixels) if latents is None: _A = self.transformer.num_vector_embeds - 1 _A = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) _A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device ) _A = self.scheduler.timesteps.to(self.device ) _A = latents for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ): # expand the sample if we are doing classifier free guidance _A = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` _A = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample if do_classifier_free_guidance: _A , _A = model_output.chunk(2 ) _A = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase ) _A = self.truncate(__UpperCAmelCase , __UpperCAmelCase ) # remove `log(0)`'s (`-inf`s) _A = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = self.vqvae.config.vq_embed_dim _A = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) _A = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase ) _A = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample _A = (image / 2 + 0.5).clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : float ): '''simple docstring''' _A , _A = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase ) _A = torch.exp(__UpperCAmelCase ) _A = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out _A = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase ) _A = torch.cat((all_true, keep_mask) , dim=1 ) _A = keep_mask[:, :-1, :] _A = keep_mask.gather(1 , indices.argsort(1 ) ) _A = log_p_x_0.clone() _A = -torch.inf # -inf = log(0) return rv
79
0
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) class lowerCamelCase_ (snake_case__ ): '''simple docstring''' def __init__( self : Optional[int] , A : Union[List[ControlNetModel], Tuple[ControlNetModel]] ): super().__init__() _UpperCAmelCase : Optional[int] = nn.ModuleList(A ) def _A ( self : Dict , A : torch.FloatTensor , A : Union[torch.Tensor, float, int] , A : torch.Tensor , A : List[torch.tensor] , A : List[float] , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[Dict[str, Any]] = None , A : bool = False , A : bool = True , ): for i, (image, scale, controlnet) in enumerate(zip(A , A , self.nets ) ): _UpperCAmelCase , _UpperCAmelCase : str = controlnet( A , A , A , A , A , A , A , A , A , A , A , ) # merge samples if i == 0: _UpperCAmelCase , _UpperCAmelCase : List[Any] = down_samples, mid_sample else: _UpperCAmelCase : Optional[int] = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(A , A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def _A ( self : List[str] , A : Union[str, os.PathLike] , A : bool = True , A : Callable = None , A : bool = False , A : Optional[str] = None , ): _UpperCAmelCase : str = 0 _UpperCAmelCase : str = save_directory for controlnet in self.nets: controlnet.save_pretrained( A , is_main_process=A , save_function=A , safe_serialization=A , variant=A , ) idx += 1 _UpperCAmelCase : Tuple = model_path_to_save + F"""_{idx}""" @classmethod def _A ( cls : int , A : Optional[Union[str, os.PathLike]] , **A : Tuple ): _UpperCAmelCase : str = 0 _UpperCAmelCase : int = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... _UpperCAmelCase : int = pretrained_model_path while os.path.isdir(A ): _UpperCAmelCase : List[str] = ControlNetModel.from_pretrained(A , **A ) controlnets.append(A ) idx += 1 _UpperCAmelCase : Dict = pretrained_model_path + F"""_{idx}""" logger.info(F"""{len(A )} controlnets loaded from {pretrained_model_path}.""" ) if len(A ) == 0: raise ValueError( F"""No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}.""" ) return cls(A )
31
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def __lowercase ( __lowercase , __lowercase=False ) -> int: '''simple docstring''' _A = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: _A = "" else: _A = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[ : config.hidden_size, : ] _A = in_proj_bias[: config.hidden_size] _A = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _A = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _A = in_proj_weight[ -config.hidden_size :, : ] _A = in_proj_bias[-config.hidden_size :] def __lowercase ( __lowercase ) -> List[str]: '''simple docstring''' _A = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Tuple: '''simple docstring''' _A = dct.pop(__lowercase ) _A = val def __lowercase ( ) -> List[str]: '''simple docstring''' _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' _A = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=__lowercase , ) _A = ViTHybridConfig(backbone_config=__lowercase , image_size=384 , num_labels=1000 ) _A = False # load original model from timm _A = timm.create_model(__lowercase , pretrained=__lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _A = timm_model.state_dict() if base_model: remove_classification_head_(__lowercase ) _A = create_rename_keys(__lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase , __lowercase ) _A = "huggingface/label-files" _A = "imagenet-1k-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _A = ViTHybridModel(__lowercase ).eval() else: _A = ViTHybridForImageClassification(__lowercase ).eval() model.load_state_dict(__lowercase ) # create image processor _A = create_transform(**resolve_data_config({} , model=__lowercase ) ) _A = transform.transforms _A = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _A = ViTHybridImageProcessor( do_resize=__lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _A = prepare_img() _A = transform(__lowercase ).unsqueeze(0 ) _A = processor(__lowercase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__lowercase , __lowercase ) # verify logits with torch.no_grad(): _A = model(__lowercase ) _A = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: _A = timm_model.forward_features(__lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowercase , outputs.pooler_output , atol=1e-3 ) else: _A = timm_model(__lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__lowercase ) if push_to_hub: print(F'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(F'''ybelkada/{vit_name}''' ) processor.push_to_hub(F'''ybelkada/{vit_name}''' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_r50_s16_384''', type=str, help='''Name of the hybrid ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) lowerCamelCase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
79
0
import math import flax.linen as nn import jax.numpy as jnp def SCREAMING_SNAKE_CASE_ ( __A : jnp.ndarray , __A : int , __A : float = 1 , __A : float = 1 , __A : float = 1.0e4 , __A : bool = False , __A : float = 1.0 , ) -> jnp.ndarray: """simple docstring""" assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" a_ : int = float(embedding_dim // 2 ) a_ : str = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) a_ : Optional[int] = min_timescale * jnp.exp(jnp.arange(__A , dtype=jnp.floataa ) * -log_timescale_increment ) a_ : Optional[int] = jnp.expand_dims(__A , 1 ) * jnp.expand_dims(__A , 0 ) # scale embeddings a_ : str = scale * emb if flip_sin_to_cos: a_ : str = jnp.concatenate([jnp.cos(__A ), jnp.sin(__A )] , axis=1 ) else: a_ : Any = jnp.concatenate([jnp.sin(__A ), jnp.cos(__A )] , axis=1 ) a_ : Optional[int] = jnp.reshape(__A , [jnp.shape(__A )[0], embedding_dim] ) return signal class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int = 32 snake_case__ : jnp.dtype = jnp.floataa @nn.compact def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: a_ : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = nn.silu(SCREAMING_SNAKE_CASE__ ) a_ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(SCREAMING_SNAKE_CASE__ ) return temb class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int = 32 snake_case__ : bool = False snake_case__ : float = 1 @nn.compact def __call__( self : str , SCREAMING_SNAKE_CASE__ : int ) -> Tuple: return get_sinusoidal_embeddings( SCREAMING_SNAKE_CASE__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
32
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
0
"""simple docstring""" import string from math import logaa def lowercase ( __snake_case : str , __snake_case : str ): lowercase_ : Union[str, Any] = document.translate( str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' ) lowercase_ : str = document_without_punctuation.split(''' ''' ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def lowercase ( __snake_case : str , __snake_case : str ): lowercase_ : Union[str, Any] = corpus.lower().translate( str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with '' lowercase_ : Optional[Any] = corpus_without_punctuation.split('''\n''' ) lowercase_ : Tuple = term.lower() return (len([doc for doc in docs if term in doc] ), len(__snake_case )) def lowercase ( __snake_case : int , __snake_case : int , __snake_case : int=False ): if smoothing: if n == 0: raise ValueError('''log10(0) is undefined.''' ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError('''df must be > 0''' ) elif n == 0: raise ValueError('''log10(0) is undefined.''' ) return round(logaa(n / df ) , 3 ) def lowercase ( __snake_case : int , __snake_case : int ): return round(tf * idf , 3 )
33
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets lowerCamelCase_ = datasets.logging.get_logger(__name__) lowerCamelCase_ = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = "{COMET}: A Neural Framework for {MT} Evaluation", author = "Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", pages = "2685--2702", } ''' lowerCamelCase_ = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' lowerCamelCase_ = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results["scores"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase ( self : int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "sources": datasets.Value("string" , id="sequence" ), "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[ "https://github.com/Unbabel/COMET", "https://www.aclweb.org/anthology/2020.emnlp-main.213/", "http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6", ] , ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ): '''simple docstring''' if self.config_name == "default": _A = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) ) else: _A = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowerCAmelCase ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=False ): '''simple docstring''' if gpus is None: _A = 1 if torch.cuda.is_available() else 0 _A = {"src": sources, "mt": predictions, "ref": references} _A = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for t in zip(*data.values() )] _A , _A = self.scorer.predict(__UpperCAmelCase , gpus=__UpperCAmelCase , progress_bar=__UpperCAmelCase ) return {"mean_score": mean_score, "scores": scores}
79
0
'''simple docstring''' from decimal import Decimal, getcontext from math import ceil, factorial def snake_case_ (_a : int ): if not isinstance(_a , _a ): raise TypeError('''Undefined for non-integers''' ) elif precision < 1: raise ValueError('''Undefined for non-natural numbers''' ) UpperCAmelCase = precision UpperCAmelCase = ceil(precision / 1_4 ) UpperCAmelCase = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt() UpperCAmelCase = 1 UpperCAmelCase = 1_3_5_9_1_4_0_9 UpperCAmelCase = Decimal(_a ) for k in range(1 , _a ): UpperCAmelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(_a ) ** 3) linear_term += 5_4_5_1_4_0_1_3_4 exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": A =50 print(f"""The first {n} digits of pi is: {pi(n)}""")
34
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase , __lowercase = None , __lowercase = None ) -> None: '''simple docstring''' if start is None: _A = 0 if end is None: _A = len(__lowercase ) - 1 if start >= end: return _A = (start + end) // 2 slowsort(__lowercase , __lowercase , __lowercase ) slowsort(__lowercase , mid + 1 , __lowercase ) if sequence[end] < sequence[mid]: _A , _A = sequence[mid], sequence[end] slowsort(__lowercase , __lowercase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
79
0
'''simple docstring''' import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> List[str]: try: snake_case__ : Optional[int] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. snake_case__ : Dict = default else: # KEY is set, convert it to True or False. try: snake_case__ : List[str] = strtobool(_lowerCAmelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"If set, {key} must be yes or no." ) return _value __a = parse_flag_from_env("RUN_SLOW", default=False) def __snake_case( _lowerCAmelCase ) -> List[Any]: return unittest.skip("""Test was skipped""" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> Union[str, Any]: return unittest.skipUnless(_run_slow_tests , """test is slow""" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> List[str]: return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> Any: return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> int: return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> Union[str, Any]: return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> List[str]: return unittest.skipUnless( is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> List[Any]: return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> Any: return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> Union[str, Any]: return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> List[str]: return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> Dict: return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> List[str]: return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> List[Any]: return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> Union[str, Any]: return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> Optional[int]: return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Any: if test_case is None: return partial(_lowerCAmelCase , version=_lowerCAmelCase ) return unittest.skipUnless(is_torch_version(""">=""" , _lowerCAmelCase ) , f"test requires torch version >= {version}" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> int: return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> Any: return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> Optional[Any]: return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(_lowerCAmelCase ) __a = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def __snake_case( _lowerCAmelCase ) -> int: return unittest.skipUnless( _atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(_lowerCAmelCase ) class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" lowercase = True @classmethod def lowerCamelCase ( cls : Any ): snake_case__ : Union[str, Any] = tempfile.mkdtemp() @classmethod def lowerCamelCase ( cls : int ): if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def lowerCamelCase ( self : Tuple ): if self.clear_on_setup: for path in Path(self.tmpdir ).glob("""**/*""" ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(snake_case_ ) class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase ( self : Any ): super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase ( self : Optional[int] , snake_case_ : Union[mock.Mock, List[mock.Mock]] ): snake_case__ : Dict = mocks if isinstance(snake_case_ , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def __snake_case( _lowerCAmelCase ) -> int: snake_case__ : Union[str, Any] = AcceleratorState() snake_case__ : int = tensor[None].clone().to(state.device ) snake_case__ : Optional[Any] = gather(_lowerCAmelCase ).cpu() snake_case__ : str = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , _lowerCAmelCase ): return False return True class UpperCAmelCase_ : """simple docstring""" def __init__( self : List[Any] , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ): snake_case__ : List[Any] = returncode snake_case__ : List[Any] = stdout snake_case__ : List[Any] = stderr async def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: while True: snake_case__ : Optional[int] = await stream.readline() if line: callback(_lowerCAmelCase ) else: break async def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> _RunOutput: if echo: print("""\nRunning: """ , """ """.join(_lowerCAmelCase ) ) snake_case__ : Any = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCAmelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) snake_case__ : List[Any] = [] snake_case__ : Any = [] def tee(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="" ): snake_case__ : str = line.decode("""utf-8""" ).rstrip() sink.append(_lowerCAmelCase ) if not quiet: print(_lowerCAmelCase , _lowerCAmelCase , file=_lowerCAmelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _lowerCAmelCase : tee(_lowerCAmelCase , _lowerCAmelCase , sys.stdout , label="""stdout:""" ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda _lowerCAmelCase : tee(_lowerCAmelCase , _lowerCAmelCase , sys.stderr , label="""stderr:""" ) ) ), ] , timeout=_lowerCAmelCase , ) return _RunOutput(await p.wait() , _lowerCAmelCase , _lowerCAmelCase ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=180 , _lowerCAmelCase=False , _lowerCAmelCase=True ) -> _RunOutput: snake_case__ : Optional[Any] = asyncio.get_event_loop() snake_case__ : List[Any] = loop.run_until_complete( _stream_subprocess(_lowerCAmelCase , env=_lowerCAmelCase , stdin=_lowerCAmelCase , timeout=_lowerCAmelCase , quiet=_lowerCAmelCase , echo=_lowerCAmelCase ) ) snake_case__ : List[Any] = """ """.join(_lowerCAmelCase ) if result.returncode > 0: snake_case__ : List[str] = """\n""".join(result.stderr ) raise RuntimeError( f"'{cmd_str}' failed with returncode {result.returncode}\n\n" f"The combined stderr from workers follows:\n{stderr}" ) return result class UpperCAmelCase_ ( _a ): """simple docstring""" pass def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> List[Any]: try: snake_case__ : List[Any] = subprocess.check_output(_lowerCAmelCase , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(_lowerCAmelCase , """decode""" ): snake_case__ : str = output.decode("""utf-8""" ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f"Command `{' '.join(_lowerCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
35
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : """simple docstring""" snake_case = PegasusConfig snake_case = {} snake_case = '''gelu''' def __init__( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : int=7 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : str=False , __UpperCAmelCase : Union[str, Any]=99 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Tuple=37 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=40 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Any=0 , ): '''simple docstring''' _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = eos_token_id _A = pad_token_id _A = bos_token_id def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A = tf.concat([input_ids, eos_tensor] , axis=1 ) _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _A = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, inputs_dict def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ): '''simple docstring''' _A = TFPegasusModel(config=__UpperCAmelCase ).get_decoder() _A = inputs_dict["input_ids"] _A = input_ids[:1, :] _A = inputs_dict["attention_mask"][:1, :] _A = inputs_dict["head_mask"] _A = 1 # first forward pass _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) _A , _A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _A = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _A = tf.concat([input_ids, next_tokens] , axis=-1 ) _A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _A = output_from_no_past[:, -3:, random_slice_idx] _A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: _A = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () snake_case = (TFPegasusForConditionalGeneration,) if is_tf_available() else () snake_case = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) snake_case = True snake_case = False snake_case = False def lowerCAmelCase ( self : str ): '''simple docstring''' _A = TFPegasusModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] snake_case = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers snake_case = '''google/pegasus-xsum''' @cached_property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.translate_src_text(**__UpperCAmelCase ) assert self.expected_text == generated_words def lowerCAmelCase ( self : Dict , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.tokenizer(self.src_text , **__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="tf" ) _A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , ) _A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase ) return generated_words @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self._assert_generated_batch_equal_expected()
79
0
from __future__ import annotations def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Tuple = get_failure_array(_lowerCamelCase ) # 2) Step through text searching for pattern _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0 # index into text, pattern while i < len(_lowerCamelCase ): if pattern[j] == text[i]: if j == (len(_lowerCamelCase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: _lowerCAmelCase : Tuple = failure[j - 1] continue i += 1 return False def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : int = [0] _lowerCAmelCase : str = 0 _lowerCAmelCase : Any = 1 while j < len(_lowerCamelCase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: _lowerCAmelCase : str = failure[i - 1] continue j += 1 failure.append(_lowerCamelCase ) return failure if __name__ == "__main__": # Test 1) _snake_case = "abc1abc12" _snake_case = "alskfjaldsabc1abc1abc12k23adsfabcabc" _snake_case = "alskfjaldsk23adsfabcabc" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) _snake_case = "ABABX" _snake_case = "ABABZABABYABABX" assert kmp(pattern, text) # Test 3) _snake_case = "AAAB" _snake_case = "ABAAAAAB" assert kmp(pattern, text) # Test 4) _snake_case = "abcdabcy" _snake_case = "abcxabcdabxabcdabcdabcy" assert kmp(pattern, text) # Test 5) _snake_case = "aabaabaaa" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
36
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : int=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=99 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=None , ): '''simple docstring''' _A = parent _A = 13 _A = 7 _A = True _A = True _A = True _A = True _A = 99 _A = 32 _A = 2 _A = 4 _A = 37 _A = "gelu" _A = 0.1 _A = 0.1 _A = 512 _A = 16 _A = 2 _A = 0.02 _A = 3 _A = 4 _A = None def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = TFRoFormerModel(config=__UpperCAmelCase ) _A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _A = [input_ids, input_mask] _A = model(__UpperCAmelCase ) _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = True _A = TFRoFormerForCausalLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase )["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ): '''simple docstring''' _A = TFRoFormerForMaskedLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForSequenceClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = self.num_choices _A = TFRoFormerForMultipleChoice(config=__UpperCAmelCase ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForTokenClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' _A = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) snake_case = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) snake_case = False snake_case = False def lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) _A = tf.constant([[0, 1, 2, 3, 4, 5]] ) _A = model(__UpperCAmelCase )[0] # TODO Replace vocab size _A = 50000 _A = [1, 6, vocab_size] self.assertEqual(output.shape , __UpperCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _A = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = tf.constant([[4, 10]] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _A = emba(input_ids.shape ) _A = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) _A = emba.weight[:3, :5] tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : str ): '''simple docstring''' _A = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) _A = embed_positions([2, 16, 768] )[None, None, :, :] _A , _A = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) _A = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
79
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''', # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : int = '''pegasus''' __lowercase : Optional[int] = ['''past_key_values'''] __lowercase : Union[str, Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self ,__UpperCAmelCase=5_0265 ,__UpperCAmelCase=1024 ,__UpperCAmelCase=12 ,__UpperCAmelCase=4096 ,__UpperCAmelCase=16 ,__UpperCAmelCase=12 ,__UpperCAmelCase=4096 ,__UpperCAmelCase=16 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=1024 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=0 ,__UpperCAmelCase=False ,__UpperCAmelCase=0 ,__UpperCAmelCase=1 ,__UpperCAmelCase=1 ,**__UpperCAmelCase ,) -> int: lowerCAmelCase__ : Any = vocab_size lowerCAmelCase__ : str = max_position_embeddings lowerCAmelCase__ : Union[str, Any] = d_model lowerCAmelCase__ : str = encoder_ffn_dim lowerCAmelCase__ : Union[str, Any] = encoder_layers lowerCAmelCase__ : Optional[Any] = encoder_attention_heads lowerCAmelCase__ : List[Any] = decoder_ffn_dim lowerCAmelCase__ : Tuple = decoder_layers lowerCAmelCase__ : int = decoder_attention_heads lowerCAmelCase__ : List[str] = dropout lowerCAmelCase__ : List[Any] = attention_dropout lowerCAmelCase__ : int = activation_dropout lowerCAmelCase__ : List[Any] = activation_function lowerCAmelCase__ : Tuple = init_std lowerCAmelCase__ : List[Any] = encoder_layerdrop lowerCAmelCase__ : List[str] = decoder_layerdrop lowerCAmelCase__ : Dict = use_cache lowerCAmelCase__ : Optional[int] = encoder_layers lowerCAmelCase__ : Any = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,is_encoder_decoder=__UpperCAmelCase ,decoder_start_token_id=__UpperCAmelCase ,forced_eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase ,) @property def UpperCAmelCase_ ( self ) -> int: return self.encoder_attention_heads @property def UpperCAmelCase_ ( self ) -> int: return self.d_model
37
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''gpt_neox''' def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=50432 , __UpperCAmelCase : Any=6144 , __UpperCAmelCase : List[str]=44 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : List[str]=24576 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-5 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ): '''simple docstring''' super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = vocab_size _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = rotary_pct _A = rotary_emb_base _A = attention_dropout _A = hidden_dropout _A = classifier_dropout _A = initializer_range _A = layer_norm_eps _A = use_cache _A = tie_word_embeddings _A = use_parallel_residual _A = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'''got {self.rope_scaling}''' ) _A = self.rope_scaling.get("type" , __UpperCAmelCase ) _A = self.rope_scaling.get("factor" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
79
0
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand UpperCAmelCase_ : Dict = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) UpperCAmelCase_ : List[Any] = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) UpperCAmelCase_ : Tuple = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) UpperCAmelCase_ : Union[str, Any] = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) UpperCAmelCase_ : Optional[int] = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]), ('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) UpperCAmelCase_ : int = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) UpperCAmelCase_ : str = ( ('''JH AH TH KH QH''', 23), ('''JH 9H TH KH QH''', 22), ('''JC KH JS JD JH''', 21), ('''KH KC 3S 3H 3D''', 20), ('''8C 9C 5C 3C TC''', 19), ('''JS QS 9H TS KH''', 18), ('''7C 7S KH 2H 7H''', 17), ('''3C KH 5D 5S KH''', 16), ('''QH 8H KD JH 8S''', 15), ('''2D 6D 9D TH 7D''', 14), ) def SCREAMING_SNAKE_CASE_ ( ) -> Dict: """simple docstring""" UpperCamelCase , UpperCamelCase :Optional[int] = randrange(len(__magic_name__ ) ), randrange(len(__magic_name__ ) ) UpperCamelCase :List[Any] = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)] UpperCamelCase , UpperCamelCase :Any = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 100 ) -> Tuple: """simple docstring""" return (generate_random_hand() for _ in range(__magic_name__ )) @pytest.mark.parametrize("""hand, expected""" , __magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : int ) -> Any: """simple docstring""" assert PokerHand(__magic_name__ )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , __magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : str ) -> str: """simple docstring""" assert PokerHand(__magic_name__ )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , __magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ) -> int: """simple docstring""" UpperCamelCase :str = PokerHand(__magic_name__ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , __magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : Optional[Any] ) -> List[str]: """simple docstring""" assert PokerHand(__magic_name__ )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , __magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> int: """simple docstring""" assert PokerHand(__magic_name__ )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , __magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : int ) -> Optional[Any]: """simple docstring""" assert PokerHand(__magic_name__ ).compare_with(PokerHand(__magic_name__ ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int: """simple docstring""" assert PokerHand(__magic_name__ ).compare_with(PokerHand(__magic_name__ ) ) == expected def SCREAMING_SNAKE_CASE_ ( ) -> Any: """simple docstring""" UpperCamelCase :Union[str, Any] = [PokerHand(__magic_name__ ) for hand in SORTED_HANDS] UpperCamelCase :Dict = poker_hands.copy() shuffle(__magic_name__ ) UpperCamelCase :str = chain(sorted(__magic_name__ ) ) for index, hand in enumerate(__magic_name__ ): assert hand == poker_hands[index] def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: """simple docstring""" UpperCamelCase :Dict = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=__magic_name__ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: """simple docstring""" UpperCamelCase :Any = PokerHand("""2C 4S AS 3D 5C""" ) UpperCamelCase :Dict = True UpperCamelCase :int = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def SCREAMING_SNAKE_CASE_ ( ) -> str: """simple docstring""" UpperCamelCase :List[Any] = 0 UpperCamelCase :Dict = os.path.abspath(os.path.dirname(__magic_name__ ) ) UpperCamelCase :str = os.path.join(__magic_name__ , """poker_hands.txt""" ) with open(__magic_name__ ) as file_hand: for line in file_hand: UpperCamelCase :str = line[:14].strip() UpperCamelCase :Dict = line[15:].strip() UpperCamelCase , UpperCamelCase :Any = PokerHand(__magic_name__ ), PokerHand(__magic_name__ ) UpperCamelCase :Optional[Any] = player.compare_with(__magic_name__ ) if output == "Win": answer += 1 assert answer == 376
38
'''simple docstring''' from PIL import Image def __lowercase ( __lowercase , __lowercase ) -> Image: '''simple docstring''' _A = (259 * (level + 255)) / (255 * (259 - level)) def contrast(__lowercase ) -> int: return int(128 + factor * (c - 128) ) return img.point(__lowercase ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 lowerCamelCase_ = change_contrast(img, 1_70) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
79
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _a = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys _a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
39
'''simple docstring''' def __lowercase ( __lowercase ) -> int: '''simple docstring''' assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: _A = F'''The input value of [n={number}] has to be > 0''' raise ValueError(__lowercase ) else: _A = sylvester(number - 1 ) _A = num - 1 _A = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
79
0
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __lowercase = logging.getLogger(__name__) def lowercase ( A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = False , )-> Optional[int]: '''simple docstring''' a : Tuple = bnb_quantization_config.load_in_abit a : Any = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( "You have a version of `bitsandbytes` that is not compatible with 8bit quantization," " make sure you have the latest version of `bitsandbytes` installed." ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 4bit quantization," "make sure you have the latest version of `bitsandbytes` installed." ) a : str = [] # custom device map if isinstance(A_ , A_ ) and len(device_map.keys() ) > 1: a : str = [key for key, value in device_map.items() if value in ["disk", "cpu"]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: a : Tuple = get_keys_to_not_convert(A_ ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(A_ ) a : List[Any] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: a : Union[str, Any] = [] a : Any = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(A_ ) # compatibility with peft a : List[Any] = load_in_abit a : Tuple = load_in_abit a : Optional[int] = get_parameter_device(A_ ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( "It is not recommended to quantize a loaded model. " "The model should be instantiated under the `init_empty_weights` context manager." ) a : Union[str, Any] = replace_with_bnb_layers(A_ , A_ , modules_to_not_convert=A_ ) # convert param to the right dtype a : List[str] = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: a : Dict = name.replace(".weight" , "" ).replace(".bias" , "" ) a : Optional[int] = getattr(A_ , A_ , A_ ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(A_ ): param.to(A_ ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info( F'''The model device type is {model_device.type}. However, cuda is needed for quantization.''' "We move the model to cuda." ) return model elif weights_location is None: raise RuntimeError( F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' ) else: with init_empty_weights(): a : Optional[Any] = replace_with_bnb_layers( A_ , A_ , modules_to_not_convert=A_ ) a : Dict = get_quantized_model_device_map( A_ , A_ , A_ , max_memory=A_ , no_split_module_classes=A_ , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): a : Dict = True a : List[Any] = any(x in list(device_map.values() ) for x in ["cpu", "disk"] ) load_checkpoint_in_model( A_ , A_ , A_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=A_ , offload_state_dict=A_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(A_ , device_map=A_ , offload_dir=A_ ) def lowercase ( A_ , A_ , A_=None , A_=None , A_=None )-> Any: '''simple docstring''' if device_map is None: if torch.cuda.is_available(): a : Dict = {"": torch.cuda.current_device()} else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." ) if isinstance(A_ , A_ ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " "'sequential'." ) a : List[Any] = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) a : Any = {} a : Union[str, Any] = special_dtypes a : Optional[Any] = no_split_module_classes a : List[str] = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": a : List[str] = get_balanced_memory( A_ , low_zero=(device_map == "balanced_low_0") , max_memory=A_ , **A_ , ) a : List[str] = max_memory a : Dict = infer_auto_device_map(A_ , **A_ ) if isinstance(A_ , A_ ): # check if don't have any quantized module on the cpu a : Any = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules a : List[str] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( "\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " ) else: logger.info( "Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" ) del device_map_without_some_modules return device_map def lowercase ( A_ , A_ , A_=None , A_=None )-> Any: '''simple docstring''' if modules_to_not_convert is None: a : int = [] a , a : Tuple = _replace_with_bnb_layers( A_ , A_ , A_ , A_ ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model def lowercase ( A_ , A_ , A_=None , A_=None , )-> List[Any]: '''simple docstring''' a : Optional[int] = False for name, module in model.named_children(): if current_key_name is None: a : List[Any] = [] current_key_name.append(A_ ) if isinstance(A_ , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` a : List[str] = ".".join(A_ ) a : Any = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: a : Optional[Any] = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: a : Tuple = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A_ , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: a : Union[str, Any] = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("load_in_8bit and load_in_4bit can't be both False" ) a : Optional[Any] = module.weight.data if module.bias is not None: a : Union[str, Any] = module.bias.data bnb_module.requires_grad_(A_ ) setattr(A_ , A_ , A_ ) a : Dict = True if len(list(module.children() ) ) > 0: a , a : Optional[Any] = _replace_with_bnb_layers( A_ , A_ , A_ , A_ ) a : List[Any] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowercase ( A_ )-> Union[str, Any]: '''simple docstring''' with init_empty_weights(): a : Dict = deepcopy(A_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` a : Any = find_tied_parameters(A_ ) # For compatibility with Accelerate < 0.18 if isinstance(A_ , A_ ): a : List[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: a : Union[str, Any] = sum(A_ , [] ) a : List[Any] = len(A_ ) > 0 # Check if it is a base model a : Optional[int] = False if hasattr(A_ , "base_model_prefix" ): a : int = not hasattr(A_ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head a : Optional[Any] = list(model.named_children() ) a : str = [list_modules[-1][0]] # add last module together with tied weights a : Tuple = set(A_ ) - set(A_ ) a : Optional[int] = list(set(A_ ) ) + list(A_ ) # remove ".weight" from the keys a : Tuple = [".weight", ".bias"] a : Optional[Any] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: a : str = name.replace(A_ , "" ) filtered_module_names.append(A_ ) return filtered_module_names def lowercase ( A_ )-> List[Any]: '''simple docstring''' for m in model.modules(): if isinstance(A_ , bnb.nn.Linearabit ): return True return False def lowercase ( A_ )-> Any: '''simple docstring''' return next(parameter.parameters() ).device def lowercase ( A_ , A_ , A_ , A_ , A_ , A_ , A_ )-> str: '''simple docstring''' if fpaa_statistics is None: set_module_tensor_to_device(A_ , A_ , 0 , dtype=A_ , value=A_ ) a : Optional[int] = param_name a : Union[str, Any] = model if "." in tensor_name: a : Optional[Any] = tensor_name.split("." ) for split in splits[:-1]: a : str = getattr(A_ , A_ ) if new_module is None: raise ValueError(F'''{module} has no attribute {split}.''' ) a : int = new_module a : Tuple = splits[-1] # offload weights a : List[Any] = False offload_weight(module._parameters[tensor_name] , A_ , A_ , index=A_ ) if hasattr(module._parameters[tensor_name] , "SCB" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , A_ , index=A_ , ) else: offload_weight(A_ , A_ , A_ , index=A_ ) offload_weight(A_ , param_name.replace("weight" , "SCB" ) , A_ , index=A_ ) set_module_tensor_to_device(A_ , A_ , "meta" , dtype=A_ , value=torch.empty(*param.size() ) )
40
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowerCamelCase_ = logging.getLogger(__name__) def __lowercase ( __lowercase , __lowercase ) -> Optional[int]: '''simple docstring''' if os.path.exists(__lowercase ): if os.path.exists(os.path.join(__lowercase , "config.json" ) ) and os.path.isfile( os.path.join(__lowercase , "config.json" ) ): os.remove(os.path.join(__lowercase , "config.json" ) ) if os.path.exists(os.path.join(__lowercase , "pytorch_model.bin" ) ) and os.path.isfile( os.path.join(__lowercase , "pytorch_model.bin" ) ): os.remove(os.path.join(__lowercase , "pytorch_model.bin" ) ) else: os.makedirs(__lowercase ) model.save_pretrained(__lowercase ) def __lowercase ( __lowercase , __lowercase=False ) -> Optional[int]: '''simple docstring''' _A = 2 if unlogit: _A = torch.pow(__lowercase , __lowercase ) _A = p * torch.log(__lowercase ) _A = 0 return -plogp.sum(dim=-1 ) def __lowercase ( __lowercase ) -> Optional[Any]: '''simple docstring''' logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(__lowercase ) ) ) ) for row in range(len(__lowercase ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=True , __lowercase=True , __lowercase=None , __lowercase=False ) -> int: '''simple docstring''' _A , _A = model.config.num_hidden_layers, model.config.num_attention_heads _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) if head_mask is None: _A = torch.ones(__lowercase , __lowercase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowercase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _A = None _A = 0.0 _A = 0.0 for step, inputs in enumerate(tqdm(__lowercase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ): _A = tuple(t.to(args.device ) for t in inputs ) ((_A) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _A = model(__lowercase , labels=__lowercase , head_mask=__lowercase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _A , _A , _A = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowercase ): _A = entropy(attn.detach() , __lowercase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowercase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _A = 2 _A = torch.pow(torch.pow(__lowercase , __lowercase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: _A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("Attention entropies" ) print_ad_tensor(__lowercase ) if compute_importance: logger.info("Head importance scores" ) print_ad_tensor(__lowercase ) logger.info("Head ranked by importance scores" ) _A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _A = torch.arange( head_importance.numel() , device=args.device ) _A = head_ranks.view_as(__lowercase ) print_ad_tensor(__lowercase ) return attn_entropy, head_importance, total_loss def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A , _A , _A = compute_heads_importance(__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase ) _A = 1 / loss # instead of downsteam score use the LM loss logger.info("Pruning: original score: %f, threshold: %f" , __lowercase , original_score * args.masking_threshold ) _A = torch.ones_like(__lowercase ) _A = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _A = original_score while current_score >= original_score * args.masking_threshold: _A = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _A = float("Inf" ) _A = head_importance.view(-1 ).sort()[1] if len(__lowercase ) <= num_to_mask: print("BREAK BY num_to_mask" ) break # mask heads _A = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) ) _A = new_head_mask.view(-1 ) _A = 0.0 _A = new_head_mask.view_as(__lowercase ) _A = new_head_mask.clone().detach() print_ad_tensor(__lowercase ) # Compute metric and head importance again _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , head_mask=__lowercase ) _A = 1 / loss logger.info( "Masking: current score: %f, remaining heads %d (%.1f percents)" , __lowercase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info("Final head mask" ) print_ad_tensor(__lowercase ) np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() ) return head_mask def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase ) _A = 1 / loss _A = datetime.now() - before_time _A = sum(p.numel() for p in model.parameters() ) _A = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowercase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowercase , __lowercase ): _A = [ v, ] assert sum(len(__lowercase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowercase ) _A = sum(p.numel() for p in model.parameters() ) _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase , actually_pruned=__lowercase , ) _A = 1 / loss _A = datetime.now() - before_time logger.info( "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __lowercase , __lowercase , pruned_num_params / original_num_params * 100 , ) logger.info("Pruning: score with masking: %f score with pruning: %f" , __lowercase , __lowercase ) logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 ) save_model(__lowercase , args.output_dir ) def __lowercase ( ) -> Union[str, Any]: '''simple docstring''' _A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , ) parser.add_argument( "--model_name_or_path" , default=__lowercase , type=__lowercase , required=__lowercase , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--output_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The output directory where the model predictions and checkpoints will be written." , ) # Other parameters parser.add_argument( "--config_name" , default="" , type=__lowercase , help="Pretrained config name or path if not the same as model_name_or_path" , ) parser.add_argument( "--tokenizer_name" , default="" , type=__lowercase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , ) parser.add_argument( "--cache_dir" , default=__lowercase , type=__lowercase , help="Where do you want to store the pre-trained models downloaded from s3" , ) parser.add_argument( "--data_subset" , type=__lowercase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." ) parser.add_argument( "--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" ) parser.add_argument( "--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , ) parser.add_argument( "--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." ) parser.add_argument( "--masking_threshold" , default=0.9 , type=__lowercase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , ) parser.add_argument( "--masking_amount" , default=0.1 , type=__lowercase , help="Amount to heads to masking at each masking step." ) parser.add_argument("--metric_name" , default="acc" , type=__lowercase , help="Metric to use for head masking." ) parser.add_argument( "--max_seq_length" , default=128 , type=__lowercase , help=( "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, sequences shorter padded." ) , ) parser.add_argument("--batch_size" , default=1 , type=__lowercase , help="Batch size." ) parser.add_argument("--seed" , type=__lowercase , default=42 ) parser.add_argument("--local_rank" , type=__lowercase , default=-1 , help="local_rank for distributed training on gpus" ) parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" ) parser.add_argument("--server_ip" , type=__lowercase , default="" , help="Can be used for distant debugging." ) parser.add_argument("--server_port" , type=__lowercase , default="" , help="Can be used for distant debugging." ) _A = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowercase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _A = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" ) _A = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _A = torch.device("cuda" , args.local_rank ) _A = 1 torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _A = nn.parallel.DistributedDataParallel( __lowercase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowercase ) elif args.n_gpu > 1: _A = nn.DataParallel(__lowercase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowercase ) torch.save(__lowercase , os.path.join(args.output_dir , "run_args.bin" ) ) logger.info("Training/evaluation parameters %s" , __lowercase ) # Prepare dataset _A = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _A = (torch.from_numpy(__lowercase ),) _A = TensorDataset(*__lowercase ) _A = RandomSampler(__lowercase ) _A = DataLoader(__lowercase , sampler=__lowercase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowercase , __lowercase , __lowercase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _A = mask_heads(__lowercase , __lowercase , __lowercase ) prune_heads(__lowercase , __lowercase , __lowercase , __lowercase ) if __name__ == "__main__": main()
79
0
'''simple docstring''' import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml _A : Optional[int] =logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]: def run_func(UpperCamelCase ): @wraps(UpperCamelCase ) def run_in_eager_mode(*UpperCamelCase , **UpperCamelCase ): return func(*UpperCamelCase , **UpperCamelCase ) @wraps(UpperCamelCase ) @tf.function(experimental_compile=UpperCamelCase ) def run_in_graph_mode(*UpperCamelCase , **UpperCamelCase ): return func(*UpperCamelCase , **UpperCamelCase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> ["tf.Tensor"]: lowerCamelCase__ : str = random.Random() lowerCamelCase__ : Any = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(UpperCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class _lowercase ( _lowercase ): a = 42 a = 42 a = "TensorFlow" @property def lowerCamelCase_ ( self: Dict ): return tf.__version__ def lowerCamelCase_ ( self: str , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: int ): # initialize GPU on separate process lowerCamelCase__ : str = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowerCamelCase__ : Tuple = self._prepare_inference_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return self._measure_speed(_inference ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: int ): lowerCamelCase__ : Union[str, Any] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowerCamelCase__ : Dict = self._prepare_train_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return self._measure_speed(_train ) def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: int ): # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase__ ) lowerCamelCase__ : int = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowerCamelCase__ : Tuple = self._prepare_inference_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return self._measure_memory(_inference ) def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: int ): if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase__ ) lowerCamelCase__ : Tuple = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowerCamelCase__ : List[Any] = self._prepare_train_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return self._measure_memory(_train ) def lowerCamelCase_ ( self: str , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: int ): lowerCamelCase__ : int = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) lowerCamelCase__ : List[str] = ( hasattr(UpperCamelCase__ , """architectures""" ) and isinstance(config.architectures , UpperCamelCase__ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowerCamelCase__ : Optional[int] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model lowerCamelCase__ : Optional[Any] = __import__("""transformers""" , fromlist=[model_class] ) lowerCamelCase__ : List[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = model_cls(UpperCamelCase__ ) except ImportError: raise ImportError( F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: lowerCamelCase__ : Union[str, Any] = TF_MODEL_MAPPING[config.__class__](UpperCamelCase__ ) # encoder-decoder has vocab size saved differently lowerCamelCase__ : int = config.vocab_size if hasattr(UpperCamelCase__ , """vocab_size""" ) else config.encoder.vocab_size lowerCamelCase__ : int = random_input_ids(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , training=UpperCamelCase__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(UpperCamelCase__ , training=UpperCamelCase__ ) lowerCamelCase__ : Any = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: int ): lowerCamelCase__ : Tuple = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) lowerCamelCase__ : Any = ( hasattr(UpperCamelCase__ , """architectures""" ) and isinstance(config.architectures , UpperCamelCase__ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowerCamelCase__ : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model lowerCamelCase__ : Tuple = __import__("""transformers""" , fromlist=[model_class] ) lowerCamelCase__ : Optional[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : str = model_cls(UpperCamelCase__ ) except ImportError: raise ImportError( F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: lowerCamelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCamelCase__ ) # encoder-decoder has vocab size saved differently lowerCamelCase__ : Tuple = config.vocab_size if hasattr(UpperCamelCase__ , """vocab_size""" ) else config.encoder.vocab_size lowerCamelCase__ : int = random_input_ids(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): lowerCamelCase__ : int = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )[0] lowerCamelCase__ : Any = tf.gradients(UpperCamelCase__ , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): lowerCamelCase__ : Dict = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )[0] lowerCamelCase__ : Union[str, Any] = tf.gradients(UpperCamelCase__ , model.trainable_variables ) return gradients lowerCamelCase__ : List[Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: int ): with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(UpperCamelCase__ , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average lowerCamelCase__ : Tuple = timeit.repeat( UpperCamelCase__ , repeat=self.args.repeat , number=10 , ) return min(UpperCamelCase__ ) / 10.0 except ResourceExhaustedError as e: self.print_fn(F'''Doesn\'t fit on GPU. {e}''' ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Callable[[], None] ): logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) lowerCamelCase__ : List[Any] = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) lowerCamelCase__ : str = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() lowerCamelCase__ : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) lowerCamelCase__ : int = nvml.nvmlDeviceGetMemoryInfo(UpperCamelCase__ ) lowerCamelCase__ : int = meminfo.used lowerCamelCase__ : int = Memory(UpperCamelCase__ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) lowerCamelCase__ : List[Any] = None else: lowerCamelCase__ : List[str] = measure_peak_memory_cpu(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = Memory(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else memory_bytes if self.args.trace_memory_line_by_line: lowerCamelCase__ : Dict = stop_memory_tracing(UpperCamelCase__ ) if memory is None: lowerCamelCase__ : Union[str, Any] = summary.total else: lowerCamelCase__ : List[str] = None return memory, summary except ResourceExhaustedError as e: self.print_fn(F'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
41
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = CycleDiffusionPipeline snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''} snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) _A = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _A = CLIPTextModel(__UpperCAmelCase ) _A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=0 ): '''simple docstring''' _A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _A = image / 2 + 0.5 if str(__UpperCAmelCase ).startswith("mps" ): _A = torch.manual_seed(__UpperCAmelCase ) else: _A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _A = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = "cpu" # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.get_dummy_components() for name, module in components.items(): if hasattr(__UpperCAmelCase , "half" ): _A = module.half() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def lowerCAmelCase ( self : Any ): '''simple docstring''' return super().test_save_load_local() @unittest.skip("non-deterministic pipeline" ) def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_inference_batch_single_identical() @skip_mps def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained( __UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images assert np.abs(image - expected_image ).max() < 2E-2
79
0
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : str = "▁" lowercase : Any = { "vocab_file": "vocab.json", "spm_file": "sentencepiece.bpe.model", } lowercase : Tuple = { "vocab_file": { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json" ), }, "spm_file": { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model" ) }, } lowercase : Dict = { "facebook/s2t-small-librispeech-asr": 1024, } lowercase : Tuple = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"] lowercase : Any = {"mustc": MUSTC_LANGS} class __UpperCAmelCase ( _lowerCamelCase ): __lowercase = VOCAB_FILES_NAMES __lowercase = PRETRAINED_VOCAB_FILES_MAP __lowercase = MAX_MODEL_INPUT_SIZES __lowercase = ["""input_ids""", """attention_mask"""] __lowercase = [] def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ): """simple docstring""" _snake_case = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , do_upper_case=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , lang_codes=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , ) _snake_case = do_upper_case _snake_case = do_lower_case _snake_case = load_json(lowerCAmelCase_ ) _snake_case = {v: k for k, v in self.encoder.items()} _snake_case = spm_file _snake_case = load_spm(lowerCAmelCase_ , self.sp_model_kwargs ) if lang_codes is not None: _snake_case = lang_codes _snake_case = LANGUAGES[lang_codes] _snake_case = [F'<lang:{lang}>' for lang in self.langs] _snake_case = {lang: self.sp_model.PieceToId(F'<lang:{lang}>' ) for lang in self.langs} _snake_case = self.lang_tokens _snake_case = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: _snake_case = {} @property def lowerCamelCase ( self ): """simple docstring""" return len(self.encoder ) @property def lowerCamelCase ( self ): """simple docstring""" return self._tgt_lang @tgt_lang.setter def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" _snake_case = new_tgt_lang self.set_tgt_lang_special_tokens(lowerCAmelCase_ ) def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" _snake_case = self.lang_code_to_id[tgt_lang] _snake_case = [lang_code_id] def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ ) def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" return self.encoder.get(lowerCAmelCase_ , self.encoder[self.unk_token] ) def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" return self.decoder.get(lowerCAmelCase_ , self.unk_token ) def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" _snake_case = [] _snake_case = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: _snake_case = self.sp_model.decode(lowerCAmelCase_ ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " _snake_case = [] else: current_sub_tokens.append(lowerCAmelCase_ ) _snake_case = self.sp_model.decode(lowerCAmelCase_ ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) _snake_case = [1] * len(self.prefix_tokens ) _snake_case = [1] if token_ids_a is None: return prefix_ones + ([0] * len(lowerCAmelCase_ )) + suffix_ones return prefix_ones + ([0] * len(lowerCAmelCase_ )) + ([0] * len(lowerCAmelCase_ )) + suffix_ones def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" _snake_case = self.__dict__.copy() _snake_case = None return state def __setstate__( self , lowerCAmelCase_ ): """simple docstring""" _snake_case = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _snake_case = {} _snake_case = load_spm(self.spm_file , self.sp_model_kwargs ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ): """simple docstring""" _snake_case = Path(lowerCAmelCase_ ) assert save_dir.is_dir(), F'{save_directory} should be a directory' _snake_case = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) _snake_case = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder , lowerCAmelCase_ ) if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , lowerCAmelCase_ ) elif not os.path.isfile(self.spm_file ): with open(lowerCAmelCase_ , 'wb' ) as fi: _snake_case = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase_ ) return (str(lowerCAmelCase_ ), str(lowerCAmelCase_ )) def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> sentencepiece.SentencePieceProcessor: _snake_case = sentencepiece.SentencePieceProcessor(**__A ) spm.Load(str(__A ) ) return spm def SCREAMING_SNAKE_CASE__ ( __A ) -> Union[Dict, List]: with open(__A , 'r' ) as f: return json.load(__A ) def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> None: with open(__A , 'w' ) as f: json.dump(__A , __A , indent=2 )
42
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) __lowercase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } __lowercase = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' for attribute in key.split('''.''' ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models __UpperCamelCase :str = '''lm_head''' __UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if weight_type is not None: __UpperCamelCase :int = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape else: __UpperCamelCase :Any = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __UpperCamelCase :Optional[Any] = value elif weight_type == "weight_g": __UpperCamelCase :Any = value elif weight_type == "weight_v": __UpperCamelCase :Any = value elif weight_type == "bias": __UpperCamelCase :Any = value else: __UpperCamelCase :Optional[Any] = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Any = [] __UpperCamelCase :Tuple = fairseq_model.state_dict() __UpperCamelCase :Tuple = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): __UpperCamelCase :Optional[Any] = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , ) __UpperCamelCase :List[Any] = True else: for key, mapped_key in MAPPING.items(): __UpperCamelCase :Optional[int] = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __UpperCamelCase :Optional[int] = True if "*" in mapped_key: __UpperCamelCase :str = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2] __UpperCamelCase :Tuple = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE ) if "weight_g" in name: __UpperCamelCase :Union[str, Any] = '''weight_g''' elif "weight_v" in name: __UpperCamelCase :Optional[int] = '''weight_v''' elif "bias" in name: __UpperCamelCase :str = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __UpperCamelCase :int = '''weight''' else: __UpperCamelCase :str = None set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Optional[int] = full_name.split('''conv_layers.''' )[-1] __UpperCamelCase :Optional[int] = name.split('''.''' ) __UpperCamelCase :int = int(items[0] ) __UpperCamelCase :List[str] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __UpperCamelCase :Union[str, Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __UpperCamelCase :int = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __UpperCamelCase :str = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __UpperCamelCase :List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE ) @torch.no_grad() def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ): '''simple docstring''' if config_path is not None: __UpperCamelCase :str = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE ) else: __UpperCamelCase :Optional[Any] = UniSpeechConfig() if is_finetuned: if dict_path: __UpperCamelCase :Tuple = Dictionary.load_from_json(SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __UpperCamelCase :Tuple = target_dict.pad_index __UpperCamelCase :Any = target_dict.bos_index __UpperCamelCase :List[str] = target_dict.eos_index __UpperCamelCase :int = len(target_dict.symbols ) __UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) ) return os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) __UpperCamelCase :List[str] = target_dict.indices # fairseq has the <pad> and <s> switched __UpperCamelCase :int = 42 __UpperCamelCase :str = 43 with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __UpperCamelCase :Optional[int] = WavaVecaPhonemeCTCTokenizer( SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , ) __UpperCamelCase :int = True if config.feat_extract_norm == '''layer''' else False __UpperCamelCase :List[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , ) __UpperCamelCase :List[str] = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) __UpperCamelCase :str = UniSpeechForCTC(SCREAMING_SNAKE_CASE ) else: __UpperCamelCase :List[Any] = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE ) if is_finetuned: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} ) else: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __UpperCamelCase :Optional[int] = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) __lowercase = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
43
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCamelCase_ = get_logger(__name__) class _UpperCAmelCase : """simple docstring""" snake_case = '''dummy_data''' snake_case = '''datasets''' snake_case = False def __init__( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Union[Version, str] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[List[Callable]] = None , ): '''simple docstring''' _A = 0 _A = dataset_name _A = cache_dir _A = use_local_dummy_data _A = config # download_callbacks take a single url as input _A = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _A = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _A = str(__UpperCAmelCase ) # to be downloaded _A = None _A = None @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self._dummy_file is None: _A = self.download_dummy_data() return self._dummy_file @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _A = cached_path( __UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase ) return os.path.join(__UpperCAmelCase , self.dummy_file_name ) @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' if self._bucket_url is None: _A = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def lowerCAmelCase ( self : str ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , *__UpperCAmelCase : Dict ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _A = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _A = self.dummy_file_name # special case when data_url is a dict if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase ) elif isinstance(__UpperCAmelCase , (list, tuple) ): return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase ) else: return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : Any ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : List[str] ): '''simple docstring''' return path def lowerCAmelCase ( self : str ): '''simple docstring''' return {} def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): for single_url in single_urls: download_callback(__UpperCAmelCase ) else: _A = single_urls download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) for x in single_urls] else: _A = single_urls _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) _A = value # make sure that values are unique if all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _A = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __UpperCAmelCase ) ) for url in data_url ) _A = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _A = [data_url[0]] * len(__UpperCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__UpperCAmelCase ) return dummy_data_list def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__UpperCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass def lowerCAmelCase ( self : Dict ): '''simple docstring''' pass def lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' def _iter_archive_members(__UpperCAmelCase : List[Any] ): # this preserves the order of the members inside the ZIP archive _A = Path(self.dummy_file ).parent _A = path.relative_to(__UpperCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _A = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__UpperCAmelCase ) _A = Path(__UpperCAmelCase ) _A = _iter_archive_members(__UpperCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__UpperCAmelCase ).as_posix(), file_path.open("rb" ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [paths] for path in paths: if os.path.isfile(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__UpperCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__UpperCAmelCase , __UpperCAmelCase )
79
0
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): _UpperCamelCase : List[Any] = XGLMTokenizer _UpperCamelCase : List[Any] = XGLMTokenizerFast _UpperCamelCase : Dict = True _UpperCamelCase : Tuple = True def __A ( self ): super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ ) tokenizer.save_pretrained(self.tmpdirname ) def __A ( self ): _lowerCAmelCase : List[str] = """<pad>""" _lowerCAmelCase : List[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ ) def __A ( self ): _lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(len(a__ ) , 1008 ) def __A ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1008 ) def __A ( self ): _lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ ) _lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( a__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) _lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ ) self.assertListEqual( a__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a__ ) self.assertListEqual( a__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def __A ( self ): return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) def __A ( self ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(a__ , f.name ) _lowerCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=a__ ) _lowerCAmelCase : List[str] = pickle.dumps(a__ ) pickle.loads(a__ ) def __A ( self ): if not self.test_rust_tokenizer: return _lowerCAmelCase : List[str] = self.get_tokenizer() _lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer() _lowerCAmelCase : Tuple = """I was born in 92000, and this is falsé.""" _lowerCAmelCase : List[Any] = tokenizer.tokenize(a__ ) _lowerCAmelCase : Tuple = rust_tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) _lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ ) _lowerCAmelCase : str = rust_tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) _lowerCAmelCase : int = self.get_rust_tokenizer() _lowerCAmelCase : Dict = tokenizer.encode(a__ ) _lowerCAmelCase : List[Any] = rust_tokenizer.encode(a__ ) self.assertListEqual(a__ , a__ ) @slow def __A ( self ): _lowerCAmelCase : int = """Hello World!""" _lowerCAmelCase : Optional[int] = [2, 31227, 4447, 35] self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) ) @slow def __A ( self ): _lowerCAmelCase : Any = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth""" ) # fmt: off _lowerCAmelCase : List[str] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] # fmt: on self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) ) @slow def __A ( self ): # fmt: off _lowerCAmelCase : List[str] = { """input_ids""": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a__ , model_name="""facebook/xglm-564M""" , padding=a__ , )
44
'''simple docstring''' def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]: '''simple docstring''' if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ): _A = len(set_a.intersection(__lowercase ) ) if alternative_union: _A = len(__lowercase ) + len(__lowercase ) else: _A = len(set_a.union(__lowercase ) ) return intersection / union if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ): _A = [element for element in set_a if element in set_b] if alternative_union: _A = len(__lowercase ) + len(__lowercase ) return len(__lowercase ) / union else: _A = set_a + [element for element in set_b if element not in set_a] return len(__lowercase ) / len(__lowercase ) return len(__lowercase ) / len(__lowercase ) return None if __name__ == "__main__": lowerCamelCase_ = {'''a''', '''b''', '''c''', '''d''', '''e'''} lowerCamelCase_ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''} print(jaccard_similarity(set_a, set_b))
79
0
"""simple docstring""" def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int: return int((input_a, input_a).count(1 ) != 0 ) def lowercase ( ) -> None: assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
45
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 0 snake_case = False snake_case = 3.0 class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def lowerCAmelCase ( self : int ): '''simple docstring''' _A = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _A = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _A = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , __UpperCAmelCase ) @require_multi_gpu def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowerCamelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCamelCase_ = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCamelCase_ = torch.nn.Linear(1_00, 2_00) lowerCamelCase_ = accelerator.prepare(model) # Check the values changed in kwargs lowerCamelCase_ = '''''' lowerCamelCase_ = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
79
0
"""simple docstring""" def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) lowerCAmelCase = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b" lowerCAmelCase = str(bin(SCREAMING_SNAKE_CASE ) )[2:] lowerCAmelCase = max(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) ) return "0b" + "".join( str(int("""1""" in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE ) , b_binary.zfill(SCREAMING_SNAKE_CASE ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
46
'''simple docstring''' def __lowercase ( __lowercase = 100 ) -> int: '''simple docstring''' _A = n * (n + 1) * (2 * n + 1) / 6 _A = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
79
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCamelCase : List[str] = logging.get_logger(__name__) lowerCamelCase : List[Any] = { "facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json", } class A__ ( A__ , A__ ): A__ = 'convnextv2' def __init__( self : Tuple , _a : Optional[int]=3 , _a : Any=4 , _a : int=4 , _a : Union[str, Any]=None , _a : List[str]=None , _a : Optional[Any]="gelu" , _a : Any=0.02 , _a : Any=1e-12 , _a : Tuple=0.0 , _a : int=224 , _a : Any=None , _a : Optional[int]=None , **_a : List[str] , ) -> Optional[Any]: '''simple docstring''' super().__init__(**_a ) _SCREAMING_SNAKE_CASE =num_channels _SCREAMING_SNAKE_CASE =patch_size _SCREAMING_SNAKE_CASE =num_stages _SCREAMING_SNAKE_CASE =[96, 192, 384, 768] if hidden_sizes is None else hidden_sizes _SCREAMING_SNAKE_CASE =[3, 3, 9, 3] if depths is None else depths _SCREAMING_SNAKE_CASE =hidden_act _SCREAMING_SNAKE_CASE =initializer_range _SCREAMING_SNAKE_CASE =layer_norm_eps _SCREAMING_SNAKE_CASE =drop_path_rate _SCREAMING_SNAKE_CASE =image_size _SCREAMING_SNAKE_CASE =['stem'] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )] _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =get_aligned_output_features_output_indices( out_features=_a , out_indices=_a , stage_names=self.stage_names )
47
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') lowerCamelCase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the training data.'''} ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the validation data.'''} ) snake_case = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) snake_case = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) snake_case = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) snake_case = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : Optional[int]=192 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=0.6 ): '''simple docstring''' _A = input_size _A = mask_patch_size _A = model_patch_size _A = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("Input size must be divisible by mask patch size" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("Mask patch size must be divisible by model patch size" ) _A = self.input_size // self.mask_patch_size _A = self.mask_patch_size // self.model_patch_size _A = self.rand_size**2 _A = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : Any ): '''simple docstring''' _A = np.random.permutation(self.token_count )[: self.mask_count] _A = np.zeros(self.token_count , dtype=__UpperCAmelCase ) _A = 1 _A = mask.reshape((self.rand_size, self.rand_size) ) _A = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def __lowercase ( __lowercase ) -> str: '''simple docstring''' _A = torch.stack([example["pixel_values"] for example in examples] ) _A = torch.stack([example["mask"] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def __lowercase ( ) -> Dict: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mim" , __lowercase , __lowercase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(__lowercase ) transformers.utils.logging.set_verbosity(__lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if "validation" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0: _A = ds["train"].train_test_split(data_args.train_val_split ) _A = split["train"] _A = split["test"] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: _A = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowercase ) elif model_args.model_name_or_path: _A = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(__lowercase , "decoder_type" ): _A = "simmim" # adapt config _A = model_args.image_size if model_args.image_size is not None else config.image_size _A = model_args.patch_size if model_args.patch_size is not None else config.patch_size _A = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { "image_size": model_args.image_size, "patch_size": model_args.patch_size, "encoder_stride": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: _A = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase ) elif model_args.model_name_or_path: _A = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } _A = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: _A = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) _A = AutoModelForMaskedImageModeling.from_config(__lowercase ) if training_args.do_train: _A = ds["train"].column_names else: _A = ds["validation"].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = "image" elif "img" in column_names: _A = "img" else: _A = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py _A = Compose( [ Lambda(lambda __lowercase : img.convert("RGB" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator _A = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(__lowercase ): _A = [transforms(__lowercase ) for image in examples[image_column_name]] _A = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: _A = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__lowercase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: _A = ( ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__lowercase ) # Initialize our trainer _A = Trainer( model=__lowercase , args=__lowercase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics("eval" , __lowercase ) trainer.save_metrics("eval" , __lowercase ) # Write model card and (optionally) push to hub _A = { "finetuned_from": model_args.model_name_or_path, "tasks": "masked-image-modeling", "dataset": data_args.dataset_name, "tags": ["masked-image-modeling"], } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase ) else: trainer.create_model_card(**__lowercase ) if __name__ == "__main__": main()
79
0
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Union[str, Any] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear', 'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed', 'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } SCREAMING_SNAKE_CASE__ : Optional[int] = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]: for attribute in key.split("." ): lowerCamelCase : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if weight_type is not None: lowerCamelCase : List[Any] = getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).shape else: lowerCamelCase : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase : str = value elif weight_type == "weight_g": lowerCamelCase : Optional[int] = value elif weight_type == "weight_v": lowerCamelCase : Dict = value elif weight_type == "bias": lowerCamelCase : Optional[int] = value else: lowerCamelCase : Tuple = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : Optional[Any] = [] lowerCamelCase : Tuple = fairseq_model.state_dict() lowerCamelCase : int = hf_model.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase : str = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,hf_model.config.feat_extract_norm == "group" ,) lowerCamelCase : Dict = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: lowerCamelCase : List[str] = True if "*" in mapped_key: lowerCamelCase : int = name.split(_SCREAMING_SNAKE_CASE )[0].split("." )[-2] lowerCamelCase : Tuple = mapped_key.replace("*" ,_SCREAMING_SNAKE_CASE ) if "weight_g" in name: lowerCamelCase : List[Any] = "weight_g" elif "weight_v" in name: lowerCamelCase : Tuple = "weight_v" elif "bias" in name and "relative_attention_bias" not in name: lowerCamelCase : List[str] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase : Tuple = "weight" else: lowerCamelCase : Dict = None set_recursively(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(f'''Unused weights: {unused_weights}''' ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple: lowerCamelCase : List[Any] = full_name.split("conv_layers." )[-1] lowerCamelCase : Union[str, Any] = name.split("." ) lowerCamelCase : Optional[Any] = int(items[0] ) lowerCamelCase : Dict = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowerCamelCase : Tuple = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowerCamelCase : Optional[Any] = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowerCamelCase : Optional[Any] = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowerCamelCase : List[Any] = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Tuple: # load the pre-trained checkpoints lowerCamelCase : Optional[int] = torch.load(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[int] = WavLMConfigOrig(checkpoint["cfg"] ) lowerCamelCase : Any = WavLMOrig(_SCREAMING_SNAKE_CASE ) model.load_state_dict(checkpoint["model"] ) model.eval() if config_path is not None: lowerCamelCase : Any = WavLMConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: lowerCamelCase : Dict = WavLMConfig() lowerCamelCase : Optional[int] = WavLMModel(_SCREAMING_SNAKE_CASE ) recursively_load_weights(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) hf_wavlm.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
48
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''canine''' def __init__( self : Dict , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : int=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[Any]=16384 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Dict=1E-12 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : int=0xE000 , __UpperCAmelCase : List[Any]=0xE001 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : int=16384 , __UpperCAmelCase : Union[str, Any]=128 , **__UpperCAmelCase : Dict , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = type_vocab_size _A = layer_norm_eps # Character config: _A = downsampling_rate _A = upsampling_kernel_size _A = num_hash_functions _A = num_hash_buckets _A = local_transformer_stride
79
0
def __snake_case ( _UpperCAmelCase ): return str(_UpperCAmelCase ) == str(_UpperCAmelCase )[::-1] def __snake_case ( _UpperCAmelCase ): return int(_UpperCAmelCase ) + int(str(_UpperCAmelCase )[::-1] ) def __snake_case ( _UpperCAmelCase = 10000 ): __a = [] for num in range(1 , _UpperCAmelCase ): __a = 0 __a = num while iterations < 50: __a = sum_reverse(_UpperCAmelCase ) iterations += 1 if is_palindrome(_UpperCAmelCase ): break else: lychrel_nums.append(_UpperCAmelCase ) return len(_UpperCAmelCase ) if __name__ == "__main__": print(f'{solution() = }')
49
'''simple docstring''' class _UpperCAmelCase : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : list[int] ): '''simple docstring''' _A = len(__UpperCAmelCase ) _A = [0] * len_array if len_array > 0: _A = array[0] for i in range(1 , __UpperCAmelCase ): _A = self.prefix_sum[i - 1] + array[i] def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ): '''simple docstring''' _A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__UpperCAmelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
79
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class lowerCAmelCase ( unittest.TestCase ): def A_ ( self : Tuple ) -> str: lowerCamelCase__ : int = tempfile.mkdtemp() lowerCamelCase__ : Tuple = BlipImageProcessor() lowerCamelCase__ : str = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' ) lowerCamelCase__ : List[Any] = BlipaProcessor(UpperCAmelCase , UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def A_ ( self : List[str] , **UpperCAmelCase : str ) -> str: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer def A_ ( self : Optional[int] , **UpperCAmelCase : List[str] ) -> Tuple: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor def A_ ( self : Optional[Any] ) -> Optional[Any]: shutil.rmtree(self.tmpdirname ) def A_ ( self : Tuple ) -> Any: lowerCamelCase__ : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase__ : Optional[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def A_ ( self : Dict ) -> List[str]: lowerCamelCase__ : Optional[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) lowerCamelCase__ : Union[str, Any] = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 ) lowerCamelCase__ : str = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase ) def A_ ( self : Tuple ) -> str: lowerCamelCase__ : Dict = self.get_image_processor() lowerCamelCase__ : List[Any] = self.get_tokenizer() lowerCamelCase__ : Any = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = self.prepare_image_inputs() lowerCamelCase__ : List[Any] = image_processor(UpperCAmelCase , return_tensors='np' ) lowerCamelCase__ : List[Any] = processor(images=UpperCAmelCase , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A_ ( self : int ) -> Any: lowerCamelCase__ : Optional[Any] = self.get_image_processor() lowerCamelCase__ : List[str] = self.get_tokenizer() lowerCamelCase__ : int = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = 'lower newer' lowerCamelCase__ : Optional[int] = processor(text=UpperCAmelCase ) lowerCamelCase__ : Tuple = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def A_ ( self : Tuple ) -> List[Any]: lowerCamelCase__ : Any = self.get_image_processor() lowerCamelCase__ : Dict = self.get_tokenizer() lowerCamelCase__ : Optional[Any] = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = 'lower newer' lowerCamelCase__ : Optional[Any] = self.prepare_image_inputs() lowerCamelCase__ : Optional[Any] = processor(text=UpperCAmelCase , images=UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase ): processor() def A_ ( self : Dict ) -> Union[str, Any]: lowerCamelCase__ : Optional[int] = self.get_image_processor() lowerCamelCase__ : Optional[int] = self.get_tokenizer() lowerCamelCase__ : str = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) lowerCamelCase__ : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ : str = processor.batch_decode(UpperCAmelCase ) lowerCamelCase__ : str = tokenizer.batch_decode(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def A_ ( self : Tuple ) -> str: lowerCamelCase__ : Optional[int] = self.get_image_processor() lowerCamelCase__ : List[Any] = self.get_tokenizer() lowerCamelCase__ : List[str] = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) lowerCamelCase__ : Any = 'lower newer' lowerCamelCase__ : Optional[Any] = self.prepare_image_inputs() lowerCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase , images=UpperCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
50
'''simple docstring''' from typing import List import numpy as np def __lowercase ( __lowercase ) -> int: '''simple docstring''' _A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) _A = max(lists_lengths.values() , default=0 ) return max(1 , __lowercase ) def __lowercase ( __lowercase , __lowercase ) -> List[range]: '''simple docstring''' _A = [] for group_idx in range(__lowercase ): _A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _A = range(__lowercase , start + num_shards_to_add ) shards_indices_per_group.append(__lowercase ) return shards_indices_per_group def __lowercase ( __lowercase , __lowercase ) -> List[dict]: '''simple docstring''' _A = _number_of_shards_in_gen_kwargs(__lowercase ) if num_shards == 1: return [dict(__lowercase )] else: _A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__lowercase , __lowercase ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__lowercase ) ) ] def __lowercase ( __lowercase ) -> dict: '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __lowercase ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __lowercase ( __lowercase , __lowercase ) -> dict: '''simple docstring''' _A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )} _A = {} for size in list_sizes: _A = list(range(__lowercase ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _A = dict(__lowercase ) for key, value in shuffled_kwargs.items(): if isinstance(__lowercase , __lowercase ): _A = [value[i] for i in indices_per_size[len(__lowercase )]] return shuffled_kwargs
79
0
import warnings from functools import wraps from typing import Callable def A (__A : Callable ) -> Callable: """simple docstring""" @wraps(__A ) def _inner_fn(*__A : Dict , **__A : int ): warnings.warn( (F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , __A , ) return fn(*__A , **__A ) return _inner_fn
51
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_jukebox''': [ '''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''JukeboxConfig''', '''JukeboxPriorConfig''', '''JukeboxVQVAEConfig''', ], '''tokenization_jukebox''': ['''JukeboxTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''JukeboxModel''', '''JukeboxPreTrainedModel''', '''JukeboxVQVAE''', '''JukeboxPrior''', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
0
import os def A_ ( ) -> List[str]: UpperCamelCase : Optional[Any] = os.path.dirname(os.path.realpath(_lowerCAmelCase ) ) UpperCamelCase : Optional[int] = os.path.join(_lowerCAmelCase , "triangle.txt" ) with open(_lowerCAmelCase ) as f: UpperCamelCase : Optional[int] = f.readlines() UpperCamelCase : Tuple = [] for line in triangle: UpperCamelCase : int = [] for number in line.strip().split(" " ): numbers_from_line.append(int(_lowerCAmelCase ) ) a.append(_lowerCAmelCase ) for i in range(1 , len(_lowerCAmelCase ) ): for j in range(len(a[i] ) ): UpperCamelCase : Dict = a[i - 1][j] if j != len(a[i - 1] ) else 0 UpperCamelCase : Tuple = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(_lowerCAmelCase , _lowerCAmelCase ) return max(a[-1] ) if __name__ == "__main__": print(solution())
52
'''simple docstring''' from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Union[str, Any] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None ): '''simple docstring''' super().__init__() _A = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" _A = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ) else: _A = None _A = torch.nn.Parameter(__UpperCAmelCase ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 def __init__( self : Any , __UpperCAmelCase : VQModel , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : TransformeraDModel , __UpperCAmelCase : VQDiffusionScheduler , __UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ): '''simple docstring''' super().__init__() self.register_modules( vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ): '''simple docstring''' _A = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1 # get prompt text embeddings _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) _A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) _A = text_input_ids[:, : self.tokenizer.model_max_length] _A = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 _A = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate text embeddings for each generation per prompt _A = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: _A = self.learned_classifier_free_sampling_embeddings.embeddings _A = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 ) else: _A = [""] * batch_size _A = text_input_ids.shape[-1] _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , ) _A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings _A = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _A = negative_prompt_embeds.shape[1] _A = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 ) _A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self : Optional[Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 5.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = 1 elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = len(__UpperCAmelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' ) _A = batch_size * num_images_per_prompt _A = guidance_scale > 1.0 _A = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(__UpperCAmelCase )}.''' ) # get the initial completely masked latents unless the user supplied it _A = (batch_size, self.transformer.num_latent_pixels) if latents is None: _A = self.transformer.num_vector_embeds - 1 _A = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) _A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device ) _A = self.scheduler.timesteps.to(self.device ) _A = latents for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ): # expand the sample if we are doing classifier free guidance _A = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` _A = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample if do_classifier_free_guidance: _A , _A = model_output.chunk(2 ) _A = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase ) _A = self.truncate(__UpperCAmelCase , __UpperCAmelCase ) # remove `log(0)`'s (`-inf`s) _A = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = self.vqvae.config.vq_embed_dim _A = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) _A = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase ) _A = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample _A = (image / 2 + 0.5).clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : float ): '''simple docstring''' _A , _A = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase ) _A = torch.exp(__UpperCAmelCase ) _A = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out _A = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase ) _A = torch.cat((all_true, keep_mask) , dim=1 ) _A = keep_mask[:, :-1, :] _A = keep_mask.gather(1 , indices.argsort(1 ) ) _A = log_p_x_0.clone() _A = -torch.inf # -inf = log(0) return rv
79
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a__ : List[str] =logging.get_logger(__name__) a__ : Any ='''▁''' a__ : Dict ={'''vocab_file''': '''sentencepiece.bpe.model'''} a__ : List[Any] ={ '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } a__ : List[Any] ={ '''facebook/xglm-564M''': 2_048, } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : List[Any] =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : Optional[Any] =["input_ids", "attention_mask"] def __init__( self : Dict , __A : List[Any] , __A : Dict="<s>" , __A : Any="</s>" , __A : Union[str, Any]="</s>" , __A : Optional[int]="<s>" , __A : Dict="<unk>" , __A : Optional[Any]="<pad>" , __A : Optional[Dict[str, Any]] = None , **__A : Union[str, Any] , ): __UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer __UpperCamelCase = 7 __UpperCamelCase = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )] __UpperCamelCase = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , ) __UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__A ) ) __UpperCamelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __UpperCamelCase = 1 # Mimic fairseq token-to-id alignment for the first 4 token __UpperCamelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} __UpperCamelCase = len(self.sp_model ) __UpperCamelCase = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(__A ) __UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Tuple ): __UpperCamelCase = self.__dict__.copy() __UpperCamelCase = None __UpperCamelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self : Any , __A : Optional[int] ): __UpperCamelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __UpperCamelCase = {} __UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCamelCase ( self : Any , __A : List[int] , __A : Optional[List[int]] = None ): if token_ids_a is None: return [self.sep_token_id] + token_ids_a __UpperCamelCase = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def _lowerCamelCase ( self : Any , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return [1] + ([0] * len(__A )) return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) def _lowerCamelCase ( self : Dict , __A : List[int] , __A : Optional[List[int]] = None ): __UpperCamelCase = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def _lowerCamelCase ( self : Optional[int] ): return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCamelCase ( self : List[Any] , __A : str ): return self.sp_model.encode(__A , out_type=__A ) def _lowerCamelCase ( self : int , __A : Any ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __UpperCamelCase = self.sp_model.PieceToId(__A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCamelCase ( self : List[str] , __A : List[str] ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowerCamelCase ( self : Dict , __A : int ): __UpperCamelCase = ''.join(__A ).replace(__A , ' ' ).strip() return out_string def _lowerCamelCase ( self : int , __A : str , __A : Optional[str] = None ): if not os.path.isdir(__A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCamelCase = os.path.join( __A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __A ) elif not os.path.isfile(self.vocab_file ): with open(__A , 'wb' ) as fi: __UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(__A ) return (out_vocab_file,)
53
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def __lowercase ( __lowercase , __lowercase=False ) -> int: '''simple docstring''' _A = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: _A = "" else: _A = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[ : config.hidden_size, : ] _A = in_proj_bias[: config.hidden_size] _A = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _A = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _A = in_proj_weight[ -config.hidden_size :, : ] _A = in_proj_bias[-config.hidden_size :] def __lowercase ( __lowercase ) -> List[str]: '''simple docstring''' _A = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Tuple: '''simple docstring''' _A = dct.pop(__lowercase ) _A = val def __lowercase ( ) -> List[str]: '''simple docstring''' _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' _A = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=__lowercase , ) _A = ViTHybridConfig(backbone_config=__lowercase , image_size=384 , num_labels=1000 ) _A = False # load original model from timm _A = timm.create_model(__lowercase , pretrained=__lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _A = timm_model.state_dict() if base_model: remove_classification_head_(__lowercase ) _A = create_rename_keys(__lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase , __lowercase ) _A = "huggingface/label-files" _A = "imagenet-1k-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _A = ViTHybridModel(__lowercase ).eval() else: _A = ViTHybridForImageClassification(__lowercase ).eval() model.load_state_dict(__lowercase ) # create image processor _A = create_transform(**resolve_data_config({} , model=__lowercase ) ) _A = transform.transforms _A = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _A = ViTHybridImageProcessor( do_resize=__lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _A = prepare_img() _A = transform(__lowercase ).unsqueeze(0 ) _A = processor(__lowercase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__lowercase , __lowercase ) # verify logits with torch.no_grad(): _A = model(__lowercase ) _A = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: _A = timm_model.forward_features(__lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowercase , outputs.pooler_output , atol=1e-3 ) else: _A = timm_model(__lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__lowercase ) if push_to_hub: print(F'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(F'''ybelkada/{vit_name}''' ) processor.push_to_hub(F'''ybelkada/{vit_name}''' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_r50_s16_384''', type=str, help='''Name of the hybrid ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) lowerCamelCase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
79
0
"""simple docstring""" from collections import namedtuple a__ : Tuple = namedtuple('''from_to''', '''from_ to''') a__ : str = { '''cubicmeter''': from_to(1, 1), '''litre''': from_to(0.0_01, 1_0_0_0), '''kilolitre''': from_to(1, 1), '''gallon''': from_to(0.0_04_54, 2_64.1_72), '''cubicyard''': from_to(0.7_64_55, 1.3_07_95), '''cubicfoot''': from_to(0.0_28, 35.31_47), '''cup''': from_to(0.0_00_23_65_88, 42_26.75), } def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if from_type not in METRIC_CONVERSION: raise ValueError( f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n""" + ", ".join(lowerCAmelCase_ ) ) if to_type not in METRIC_CONVERSION: raise ValueError( f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n""" + ", ".join(lowerCAmelCase_ ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
54
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
0
'''simple docstring''' import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig a_ : Union[str, Any] = { """facebook/maskformer-swin-base-ade""": ( """https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json""" ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } a_ : Tuple = logging.get_logger(__name__) class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = "maskformer" _lowerCamelCase = {"hidden_size": "mask_feature_size"} _lowerCamelCase = ["resnet", "swin"] _lowerCamelCase = ["detr"] def __init__( self , UpperCamelCase = 256 , UpperCamelCase = 256 , UpperCamelCase = 0.1 , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 0.02 , UpperCamelCase = 1.0 , UpperCamelCase = 1.0 , UpperCamelCase = 1.0 , UpperCamelCase = 20.0 , UpperCamelCase = None , **UpperCamelCase , ): """simple docstring""" if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k lowerCamelCase_ = SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , ) if isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = backbone_config.pop("model_type" ) lowerCamelCase_ = CONFIG_MAPPING[backbone_model_type] lowerCamelCase_ = config_class.from_dict(UpperCamelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. ''' f'''Supported model types: {",".join(self.backbones_supported )}''' ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 lowerCamelCase_ = DetrConfig() else: # verify that the decoder is supported lowerCamelCase_ = ( decoder_config.pop("model_type" ) if isinstance(UpperCamelCase , UpperCamelCase ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( f'''Transformer Decoder {decoder_type} not supported, please use one of''' f''' {",".join(self.decoders_supported )}''' ) if isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = CONFIG_MAPPING[decoder_type] lowerCamelCase_ = config_class.from_dict(UpperCamelCase ) lowerCamelCase_ = backbone_config lowerCamelCase_ = decoder_config # main feature dimension for the model lowerCamelCase_ = fpn_feature_size lowerCamelCase_ = mask_feature_size # initializer lowerCamelCase_ = init_std lowerCamelCase_ = init_xavier_std # Hungarian matcher && loss lowerCamelCase_ = cross_entropy_weight lowerCamelCase_ = dice_weight lowerCamelCase_ = mask_weight lowerCamelCase_ = use_auxiliary_loss lowerCamelCase_ = no_object_weight lowerCamelCase_ = output_auxiliary_logits lowerCamelCase_ = self.decoder_config.encoder_attention_heads lowerCamelCase_ = self.decoder_config.num_hidden_layers super().__init__(**UpperCamelCase ) @classmethod def snake_case ( cls , UpperCamelCase , UpperCamelCase , **UpperCamelCase ): """simple docstring""" return cls( backbone_config=UpperCamelCase , decoder_config=UpperCamelCase , **UpperCamelCase , ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = copy.deepcopy(self.__dict__ ) lowerCamelCase_ = self.backbone_config.to_dict() lowerCamelCase_ = self.decoder_config.to_dict() lowerCamelCase_ = self.__class__.model_type return output
55
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets lowerCamelCase_ = datasets.logging.get_logger(__name__) lowerCamelCase_ = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = "{COMET}: A Neural Framework for {MT} Evaluation", author = "Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", pages = "2685--2702", } ''' lowerCamelCase_ = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' lowerCamelCase_ = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results["scores"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase ( self : int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "sources": datasets.Value("string" , id="sequence" ), "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[ "https://github.com/Unbabel/COMET", "https://www.aclweb.org/anthology/2020.emnlp-main.213/", "http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6", ] , ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ): '''simple docstring''' if self.config_name == "default": _A = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) ) else: _A = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowerCAmelCase ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=False ): '''simple docstring''' if gpus is None: _A = 1 if torch.cuda.is_available() else 0 _A = {"src": sources, "mt": predictions, "ref": references} _A = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for t in zip(*data.values() )] _A , _A = self.scorer.predict(__UpperCAmelCase , gpus=__UpperCAmelCase , progress_bar=__UpperCAmelCase ) return {"mean_score": mean_score, "scores": scores}
79
0
'''simple docstring''' from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
56
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase , __lowercase = None , __lowercase = None ) -> None: '''simple docstring''' if start is None: _A = 0 if end is None: _A = len(__lowercase ) - 1 if start >= end: return _A = (start + end) // 2 slowsort(__lowercase , __lowercase , __lowercase ) slowsort(__lowercase , mid + 1 , __lowercase ) if sequence[end] < sequence[mid]: _A , _A = sequence[mid], sequence[end] slowsort(__lowercase , __lowercase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
79
0
"""simple docstring""" A : int = { "Pillow": "Pillow", "accelerate": "accelerate>=0.11.0", "compel": "compel==0.1.8", "black": "black~=23.1", "datasets": "datasets", "filelock": "filelock", "flax": "flax>=0.4.1", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.13.2", "requests-mock": "requests-mock==1.10.0", "importlib_metadata": "importlib_metadata", "invisible-watermark": "invisible-watermark", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2", "jaxlib": "jaxlib>=0.1.65", "Jinja2": "Jinja2", "k-diffusion": "k-diffusion>=0.0.12", "torchsde": "torchsde", "note_seq": "note_seq", "librosa": "librosa", "numpy": "numpy", "omegaconf": "omegaconf", "parameterized": "parameterized", "protobuf": "protobuf>=3.20.3,<4", "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "ruff": "ruff>=0.0.241", "safetensors": "safetensors", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "scipy": "scipy", "onnx": "onnx", "regex": "regex!=2019.12.17", "requests": "requests", "tensorboard": "tensorboard", "torch": "torch>=1.4", "torchvision": "torchvision", "transformers": "transformers>=4.25.1", "urllib3": "urllib3<=2.0.0", }
57
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : """simple docstring""" snake_case = PegasusConfig snake_case = {} snake_case = '''gelu''' def __init__( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : int=7 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : str=False , __UpperCAmelCase : Union[str, Any]=99 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Tuple=37 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=40 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Any=0 , ): '''simple docstring''' _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = eos_token_id _A = pad_token_id _A = bos_token_id def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A = tf.concat([input_ids, eos_tensor] , axis=1 ) _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _A = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, inputs_dict def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ): '''simple docstring''' _A = TFPegasusModel(config=__UpperCAmelCase ).get_decoder() _A = inputs_dict["input_ids"] _A = input_ids[:1, :] _A = inputs_dict["attention_mask"][:1, :] _A = inputs_dict["head_mask"] _A = 1 # first forward pass _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) _A , _A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _A = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _A = tf.concat([input_ids, next_tokens] , axis=-1 ) _A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _A = output_from_no_past[:, -3:, random_slice_idx] _A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: _A = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () snake_case = (TFPegasusForConditionalGeneration,) if is_tf_available() else () snake_case = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) snake_case = True snake_case = False snake_case = False def lowerCAmelCase ( self : str ): '''simple docstring''' _A = TFPegasusModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] snake_case = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers snake_case = '''google/pegasus-xsum''' @cached_property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.translate_src_text(**__UpperCAmelCase ) assert self.expected_text == generated_words def lowerCAmelCase ( self : Dict , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.tokenizer(self.src_text , **__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="tf" ) _A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , ) _A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase ) return generated_words @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self._assert_generated_batch_equal_expected()
79
0
'''simple docstring''' import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def lowerCamelCase ( __lowerCamelCase : Tuple ) ->Tuple: _SCREAMING_SNAKE_CASE = fname.split(os.path.sep )[-1] return re.search(R"""^(.*)_\d+\.jpg$""" , __lowerCamelCase ).groups()[0] class a_ ( snake_case_ ): '''simple docstring''' def __init__( self , A , A=None , A=None ) -> int: _SCREAMING_SNAKE_CASE = file_names _SCREAMING_SNAKE_CASE = image_transform _SCREAMING_SNAKE_CASE = label_to_id def __len__( self ) -> Optional[Any]: return len(self.file_names ) def __getitem__( self , A ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.file_names[idx] _SCREAMING_SNAKE_CASE = PIL.Image.open(A ) _SCREAMING_SNAKE_CASE = raw_image.convert("""RGB""" ) if self.image_transform is not None: _SCREAMING_SNAKE_CASE = self.image_transform(A ) _SCREAMING_SNAKE_CASE = extract_label(A ) if self.label_to_id is not None: _SCREAMING_SNAKE_CASE = self.label_to_id[label] return {"image": image, "label": label} def lowerCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple ) ->str: # Initialize accelerator if args.with_tracking: _SCREAMING_SNAKE_CASE = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir ) else: _SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _SCREAMING_SNAKE_CASE = config["""lr"""] _SCREAMING_SNAKE_CASE = int(config["""num_epochs"""] ) _SCREAMING_SNAKE_CASE = int(config["""seed"""] ) _SCREAMING_SNAKE_CASE = int(config["""batch_size"""] ) _SCREAMING_SNAKE_CASE = config["""image_size"""] if not isinstance(__lowerCamelCase , (list, tuple) ): _SCREAMING_SNAKE_CASE = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , """isdigit""" ): if args.checkpointing_steps == "epoch": _SCREAMING_SNAKE_CASE = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): _SCREAMING_SNAKE_CASE = int(args.checkpointing_steps ) else: raise ValueError( F'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' ) else: _SCREAMING_SNAKE_CASE = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: _SCREAMING_SNAKE_CASE = os.path.split(__lowerCamelCase )[-1].split(""".""" )[0] accelerator.init_trackers(__lowerCamelCase , __lowerCamelCase ) # Grab all the image filenames _SCREAMING_SNAKE_CASE = [os.path.join(args.data_dir , __lowerCamelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )] # Build the label correspondences _SCREAMING_SNAKE_CASE = [extract_label(__lowerCamelCase ) for fname in file_names] _SCREAMING_SNAKE_CASE = list(set(__lowerCamelCase ) ) id_to_label.sort() _SCREAMING_SNAKE_CASE = {lbl: i for i, lbl in enumerate(__lowerCamelCase )} # Set the seed before splitting the data. np.random.seed(__lowerCamelCase ) torch.manual_seed(__lowerCamelCase ) torch.cuda.manual_seed_all(__lowerCamelCase ) # Split our filenames between train and validation _SCREAMING_SNAKE_CASE = np.random.permutation(len(__lowerCamelCase ) ) _SCREAMING_SNAKE_CASE = int(0.8 * len(__lowerCamelCase ) ) _SCREAMING_SNAKE_CASE = random_perm[:cut] _SCREAMING_SNAKE_CASE = random_perm[cut:] # For training we use a simple RandomResizedCrop _SCREAMING_SNAKE_CASE = Compose([RandomResizedCrop(__lowerCamelCase , scale=(0.5, 1.0) ), ToTensor()] ) _SCREAMING_SNAKE_CASE = PetsDataset( [file_names[i] for i in train_split] , image_transform=__lowerCamelCase , label_to_id=__lowerCamelCase ) # For evaluation, we use a deterministic Resize _SCREAMING_SNAKE_CASE = Compose([Resize(__lowerCamelCase ), ToTensor()] ) _SCREAMING_SNAKE_CASE = PetsDataset([file_names[i] for i in eval_split] , image_transform=__lowerCamelCase , label_to_id=__lowerCamelCase ) # Instantiate dataloaders. _SCREAMING_SNAKE_CASE = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 ) _SCREAMING_SNAKE_CASE = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _SCREAMING_SNAKE_CASE = create_model("""resnet50d""" , pretrained=__lowerCamelCase , num_classes=len(__lowerCamelCase ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _SCREAMING_SNAKE_CASE = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): _SCREAMING_SNAKE_CASE = False for param in model.get_classifier().parameters(): _SCREAMING_SNAKE_CASE = True # We normalize the batches of images to be a bit faster. _SCREAMING_SNAKE_CASE = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device ) _SCREAMING_SNAKE_CASE = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer _SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler _SCREAMING_SNAKE_CASE = OneCycleLR(optimizer=__lowerCamelCase , max_lr=__lowerCamelCase , epochs=__lowerCamelCase , steps_per_epoch=len(__lowerCamelCase ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.prepare( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # We need to keep track of how many total steps we have iterated over _SCREAMING_SNAKE_CASE = 0 # We also need to keep track of the starting epoch so files are named properly _SCREAMING_SNAKE_CASE = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F'Resumed from checkpoint: {args.resume_from_checkpoint}' ) accelerator.load_state(args.resume_from_checkpoint ) _SCREAMING_SNAKE_CASE = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint _SCREAMING_SNAKE_CASE = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) _SCREAMING_SNAKE_CASE = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` _SCREAMING_SNAKE_CASE = os.path.splitext(__lowerCamelCase )[0] if "epoch" in training_difference: _SCREAMING_SNAKE_CASE = int(training_difference.replace("""epoch_""" , """""" ) ) + 1 _SCREAMING_SNAKE_CASE = None else: _SCREAMING_SNAKE_CASE = int(training_difference.replace("""step_""" , """""" ) ) _SCREAMING_SNAKE_CASE = resume_step // len(__lowerCamelCase ) resume_step -= starting_epoch * len(__lowerCamelCase ) # Now we train the model for epoch in range(__lowerCamelCase , __lowerCamelCase ): model.train() if args.with_tracking: _SCREAMING_SNAKE_CASE = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step _SCREAMING_SNAKE_CASE = accelerator.skip_first_batches(__lowerCamelCase , __lowerCamelCase ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader _SCREAMING_SNAKE_CASE = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. _SCREAMING_SNAKE_CASE = {k: v.to(accelerator.device ) for k, v in batch.items()} _SCREAMING_SNAKE_CASE = (batch["""image"""] - mean) / std _SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) _SCREAMING_SNAKE_CASE = torch.nn.functional.cross_entropy(__lowerCamelCase , batch["""label"""] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(__lowerCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(__lowerCamelCase , __lowerCamelCase ): _SCREAMING_SNAKE_CASE = F'step_{overall_step}' if overall_step % checkpointing_steps == 0: if args.output_dir is not None: _SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , __lowerCamelCase ) accelerator.save_state(__lowerCamelCase ) model.eval() _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 0 for step, batch in enumerate(__lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. _SCREAMING_SNAKE_CASE = {k: v.to(accelerator.device ) for k, v in batch.items()} _SCREAMING_SNAKE_CASE = (batch["""image"""] - mean) / std with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) _SCREAMING_SNAKE_CASE = outputs.argmax(dim=-1 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["""label"""]) ) _SCREAMING_SNAKE_CASE = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() _SCREAMING_SNAKE_CASE = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}: {100 * eval_metric:.2f}' ) if args.with_tracking: accelerator.log( { """accuracy""": 100 * eval_metric, """train_loss""": total_loss.item() / len(__lowerCamelCase ), """epoch""": epoch, } , step=__lowerCamelCase , ) if checkpointing_steps == "epoch": _SCREAMING_SNAKE_CASE = F'epoch_{epoch}' if args.output_dir is not None: _SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , __lowerCamelCase ) accelerator.save_state(__lowerCamelCase ) if args.with_tracking: accelerator.end_training() def lowerCamelCase ( ) ->int: _SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument("""--data_dir""" , required=__lowerCamelCase , help="""The data folder on disk.""" ) parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" ) parser.add_argument( """--mixed_precision""" , type=__lowerCamelCase , default=__lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) parser.add_argument( """--checkpointing_steps""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , ) parser.add_argument( """--output_dir""" , type=__lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , ) parser.add_argument( """--project_dir""" , type=__lowerCamelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , ) _SCREAMING_SNAKE_CASE = parser.parse_args() _SCREAMING_SNAKE_CASE = {"""lr""": 3e-2, """num_epochs""": 3, """seed""": 42, """batch_size""": 64, """image_size""": 224} training_function(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": main()
58
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : int=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=99 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=None , ): '''simple docstring''' _A = parent _A = 13 _A = 7 _A = True _A = True _A = True _A = True _A = 99 _A = 32 _A = 2 _A = 4 _A = 37 _A = "gelu" _A = 0.1 _A = 0.1 _A = 512 _A = 16 _A = 2 _A = 0.02 _A = 3 _A = 4 _A = None def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = TFRoFormerModel(config=__UpperCAmelCase ) _A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _A = [input_ids, input_mask] _A = model(__UpperCAmelCase ) _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = True _A = TFRoFormerForCausalLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase )["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ): '''simple docstring''' _A = TFRoFormerForMaskedLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForSequenceClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = self.num_choices _A = TFRoFormerForMultipleChoice(config=__UpperCAmelCase ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForTokenClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' _A = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) snake_case = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) snake_case = False snake_case = False def lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) _A = tf.constant([[0, 1, 2, 3, 4, 5]] ) _A = model(__UpperCAmelCase )[0] # TODO Replace vocab size _A = 50000 _A = [1, 6, vocab_size] self.assertEqual(output.shape , __UpperCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _A = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = tf.constant([[4, 10]] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _A = emba(input_ids.shape ) _A = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) _A = emba.weight[:3, :5] tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : str ): '''simple docstring''' _A = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) _A = embed_positions([2, 16, 768] )[None, None, :, :] _A , _A = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) _A = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
79
0
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def UpperCamelCase ( __lowerCamelCase : Dict[str, torch.Tensor] ): snake_case : List[str] = [] snake_case : Optional[int] = [] snake_case : Any = [] for rt in rc.restypes: snake_case : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) snake_case : str = {name: i for i, name in enumerate(__lowerCamelCase )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) snake_case : Optional[Any] = torch.tensor( __lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) snake_case : List[Any] = torch.tensor( __lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) snake_case : int = torch.tensor( __lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , ) snake_case : int = protein["aatype"].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein snake_case : List[Any] = restype_atomaa_to_atomaa[protein_aatype] snake_case : str = restype_atomaa_mask[protein_aatype] snake_case : str = residx_atomaa_mask snake_case : Any = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back snake_case : List[str] = restype_atomaa_to_atomaa[protein_aatype] snake_case : List[Any] = residx_atomaa_to_atomaa.long() # create the corresponding mask snake_case : Union[str, Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device ) for restype, restype_letter in enumerate(rc.restypes ): snake_case : Optional[int] = rc.restype_atoa[restype_letter] snake_case : Any = rc.residue_atoms[restype_name] for atom_name in atom_names: snake_case : List[Any] = rc.atom_order[atom_name] snake_case : Optional[Any] = 1 snake_case : List[Any] = restype_atomaa_mask[protein_aatype] snake_case : int = residx_atomaa_mask return protein def UpperCamelCase ( __lowerCamelCase : Dict[str, torch.Tensor] ): snake_case : Dict = tree_map(lambda __lowerCamelCase : torch.tensor(__lowerCamelCase , device=batch["aatype"].device ) , __lowerCamelCase , np.ndarray ) snake_case : List[str] = tensor_tree_map(lambda __lowerCamelCase : np.array(__lowerCamelCase ) , make_atomaa_masks(__lowerCamelCase ) ) return out
59
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''gpt_neox''' def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=50432 , __UpperCAmelCase : Any=6144 , __UpperCAmelCase : List[str]=44 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : List[str]=24576 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-5 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ): '''simple docstring''' super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = vocab_size _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = rotary_pct _A = rotary_emb_base _A = attention_dropout _A = hidden_dropout _A = classifier_dropout _A = initializer_range _A = layer_norm_eps _A = use_cache _A = tie_word_embeddings _A = use_parallel_residual _A = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'''got {self.rope_scaling}''' ) _A = self.rope_scaling.get("type" , __UpperCAmelCase ) _A = self.rope_scaling.get("factor" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
79
0
"""simple docstring""" from __future__ import annotations import math import numpy as np from numpy.linalg import norm def _snake_case ( _snake_case : np.ndarray , _snake_case : np.ndarray ): return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_snake_case , _snake_case ) ) ) def _snake_case ( _snake_case : np.ndarray , _snake_case : np.ndarray ): if dataset.ndim != value_array.ndim: lowerCAmelCase : List[Any] = ( '''Wrong input data\'s dimensions... ''' f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}''' ) raise ValueError(_snake_case ) try: if dataset.shape[1] != value_array.shape[1]: lowerCAmelCase : Dict = ( '''Wrong input data\'s shape... ''' f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}''' ) raise ValueError(_snake_case ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('''Wrong shape''' ) if dataset.dtype != value_array.dtype: lowerCAmelCase : Optional[Any] = ( '''Input data have different datatype... ''' f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}''' ) raise TypeError(_snake_case ) lowerCAmelCase : str = [] for value in value_array: lowerCAmelCase : int = euclidean(_snake_case , dataset[0] ) lowerCAmelCase : Union[str, Any] = dataset[0].tolist() for dataset_value in dataset[1:]: lowerCAmelCase : Any = euclidean(_snake_case , _snake_case ) if dist > temp_dist: lowerCAmelCase : List[Any] = temp_dist lowerCAmelCase : Tuple = dataset_value.tolist() answer.append([vector, dist] ) return answer def _snake_case ( _snake_case : np.ndarray , _snake_case : np.ndarray ): return np.dot(_snake_case , _snake_case ) / (norm(_snake_case ) * norm(_snake_case )) if __name__ == "__main__": import doctest doctest.testmod()
60
'''simple docstring''' from PIL import Image def __lowercase ( __lowercase , __lowercase ) -> Image: '''simple docstring''' _A = (259 * (level + 255)) / (255 * (259 - level)) def contrast(__lowercase ) -> int: return int(128 + factor * (c - 128) ) return img.point(__lowercase ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 lowerCamelCase_ = change_contrast(img, 1_70) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
79
0
"""simple docstring""" import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def __a ( __lowerCamelCase ): UpperCAmelCase_ : int = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(__lowerCamelCase, __lowerCamelCase ) def __a ( __lowerCamelCase ): UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = emb.weight.shape UpperCAmelCase_ : List[Any] = nn.Linear(__lowerCamelCase, __lowerCamelCase, bias=__lowerCamelCase ) UpperCAmelCase_ : int = emb.weight.data return lin_layer def __a ( __lowerCamelCase ): UpperCAmelCase_ : List[str] = torch.load(__lowerCamelCase, map_location="cpu" ) UpperCAmelCase_ : Optional[int] = mam_aaa["args"] or mam_aaa["cfg"]["model"] UpperCAmelCase_ : Union[str, Any] = mam_aaa["model"] remove_ignore_keys_(__lowerCamelCase ) UpperCAmelCase_ : Dict = state_dict["encoder.embed_tokens.weight"].shape[0] UpperCAmelCase_ : Union[str, Any] = MaMaaaConfig( vocab_size=__lowerCamelCase, max_position_embeddings=1024, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function="relu", ) UpperCAmelCase_ : str = state_dict["decoder.embed_tokens.weight"] UpperCAmelCase_ : Any = MaMaaaForConditionalGeneration(__lowerCamelCase ) model.model.load_state_dict(__lowerCamelCase, strict=__lowerCamelCase ) UpperCAmelCase_ : Dict = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') _a = parser.parse_args() _a = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
61
'''simple docstring''' def __lowercase ( __lowercase ) -> int: '''simple docstring''' assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: _A = F'''The input value of [n={number}] has to be > 0''' raise ValueError(__lowercase ) else: _A = sylvester(number - 1 ) _A = num - 1 _A = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
79
0
import argparse import json from tqdm import tqdm def _UpperCAmelCase ( ): __UpperCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( '--src_path' , type=SCREAMING_SNAKE_CASE__ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , ) parser.add_argument( '--evaluation_set' , type=SCREAMING_SNAKE_CASE__ , help='where to store parsed evaluation_set file' , ) parser.add_argument( '--gold_data_path' , type=SCREAMING_SNAKE_CASE__ , help='where to store parsed gold_data_path file' , ) __UpperCamelCase =parser.parse_args() with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open( args.gold_data_path , 'w' ) as gold_file: __UpperCamelCase =json.load(SCREAMING_SNAKE_CASE__ ) for dpr_record in tqdm(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =dpr_record['question'] __UpperCamelCase =[context['title'] for context in dpr_record['positive_ctxs']] eval_file.write(question + '\n' ) gold_file.write('\t'.join(SCREAMING_SNAKE_CASE__ ) + '\n' ) if __name__ == "__main__": main()
62
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowerCamelCase_ = logging.getLogger(__name__) def __lowercase ( __lowercase , __lowercase ) -> Optional[int]: '''simple docstring''' if os.path.exists(__lowercase ): if os.path.exists(os.path.join(__lowercase , "config.json" ) ) and os.path.isfile( os.path.join(__lowercase , "config.json" ) ): os.remove(os.path.join(__lowercase , "config.json" ) ) if os.path.exists(os.path.join(__lowercase , "pytorch_model.bin" ) ) and os.path.isfile( os.path.join(__lowercase , "pytorch_model.bin" ) ): os.remove(os.path.join(__lowercase , "pytorch_model.bin" ) ) else: os.makedirs(__lowercase ) model.save_pretrained(__lowercase ) def __lowercase ( __lowercase , __lowercase=False ) -> Optional[int]: '''simple docstring''' _A = 2 if unlogit: _A = torch.pow(__lowercase , __lowercase ) _A = p * torch.log(__lowercase ) _A = 0 return -plogp.sum(dim=-1 ) def __lowercase ( __lowercase ) -> Optional[Any]: '''simple docstring''' logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(__lowercase ) ) ) ) for row in range(len(__lowercase ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=True , __lowercase=True , __lowercase=None , __lowercase=False ) -> int: '''simple docstring''' _A , _A = model.config.num_hidden_layers, model.config.num_attention_heads _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) if head_mask is None: _A = torch.ones(__lowercase , __lowercase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowercase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _A = None _A = 0.0 _A = 0.0 for step, inputs in enumerate(tqdm(__lowercase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ): _A = tuple(t.to(args.device ) for t in inputs ) ((_A) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _A = model(__lowercase , labels=__lowercase , head_mask=__lowercase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _A , _A , _A = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowercase ): _A = entropy(attn.detach() , __lowercase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowercase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _A = 2 _A = torch.pow(torch.pow(__lowercase , __lowercase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: _A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("Attention entropies" ) print_ad_tensor(__lowercase ) if compute_importance: logger.info("Head importance scores" ) print_ad_tensor(__lowercase ) logger.info("Head ranked by importance scores" ) _A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _A = torch.arange( head_importance.numel() , device=args.device ) _A = head_ranks.view_as(__lowercase ) print_ad_tensor(__lowercase ) return attn_entropy, head_importance, total_loss def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A , _A , _A = compute_heads_importance(__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase ) _A = 1 / loss # instead of downsteam score use the LM loss logger.info("Pruning: original score: %f, threshold: %f" , __lowercase , original_score * args.masking_threshold ) _A = torch.ones_like(__lowercase ) _A = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _A = original_score while current_score >= original_score * args.masking_threshold: _A = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _A = float("Inf" ) _A = head_importance.view(-1 ).sort()[1] if len(__lowercase ) <= num_to_mask: print("BREAK BY num_to_mask" ) break # mask heads _A = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) ) _A = new_head_mask.view(-1 ) _A = 0.0 _A = new_head_mask.view_as(__lowercase ) _A = new_head_mask.clone().detach() print_ad_tensor(__lowercase ) # Compute metric and head importance again _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , head_mask=__lowercase ) _A = 1 / loss logger.info( "Masking: current score: %f, remaining heads %d (%.1f percents)" , __lowercase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info("Final head mask" ) print_ad_tensor(__lowercase ) np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() ) return head_mask def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase ) _A = 1 / loss _A = datetime.now() - before_time _A = sum(p.numel() for p in model.parameters() ) _A = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowercase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowercase , __lowercase ): _A = [ v, ] assert sum(len(__lowercase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowercase ) _A = sum(p.numel() for p in model.parameters() ) _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase , actually_pruned=__lowercase , ) _A = 1 / loss _A = datetime.now() - before_time logger.info( "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __lowercase , __lowercase , pruned_num_params / original_num_params * 100 , ) logger.info("Pruning: score with masking: %f score with pruning: %f" , __lowercase , __lowercase ) logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 ) save_model(__lowercase , args.output_dir ) def __lowercase ( ) -> Union[str, Any]: '''simple docstring''' _A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , ) parser.add_argument( "--model_name_or_path" , default=__lowercase , type=__lowercase , required=__lowercase , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--output_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The output directory where the model predictions and checkpoints will be written." , ) # Other parameters parser.add_argument( "--config_name" , default="" , type=__lowercase , help="Pretrained config name or path if not the same as model_name_or_path" , ) parser.add_argument( "--tokenizer_name" , default="" , type=__lowercase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , ) parser.add_argument( "--cache_dir" , default=__lowercase , type=__lowercase , help="Where do you want to store the pre-trained models downloaded from s3" , ) parser.add_argument( "--data_subset" , type=__lowercase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." ) parser.add_argument( "--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" ) parser.add_argument( "--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , ) parser.add_argument( "--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." ) parser.add_argument( "--masking_threshold" , default=0.9 , type=__lowercase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , ) parser.add_argument( "--masking_amount" , default=0.1 , type=__lowercase , help="Amount to heads to masking at each masking step." ) parser.add_argument("--metric_name" , default="acc" , type=__lowercase , help="Metric to use for head masking." ) parser.add_argument( "--max_seq_length" , default=128 , type=__lowercase , help=( "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, sequences shorter padded." ) , ) parser.add_argument("--batch_size" , default=1 , type=__lowercase , help="Batch size." ) parser.add_argument("--seed" , type=__lowercase , default=42 ) parser.add_argument("--local_rank" , type=__lowercase , default=-1 , help="local_rank for distributed training on gpus" ) parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" ) parser.add_argument("--server_ip" , type=__lowercase , default="" , help="Can be used for distant debugging." ) parser.add_argument("--server_port" , type=__lowercase , default="" , help="Can be used for distant debugging." ) _A = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowercase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _A = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" ) _A = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _A = torch.device("cuda" , args.local_rank ) _A = 1 torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _A = nn.parallel.DistributedDataParallel( __lowercase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowercase ) elif args.n_gpu > 1: _A = nn.DataParallel(__lowercase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowercase ) torch.save(__lowercase , os.path.join(args.output_dir , "run_args.bin" ) ) logger.info("Training/evaluation parameters %s" , __lowercase ) # Prepare dataset _A = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _A = (torch.from_numpy(__lowercase ),) _A = TensorDataset(*__lowercase ) _A = RandomSampler(__lowercase ) _A = DataLoader(__lowercase , sampler=__lowercase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowercase , __lowercase , __lowercase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _A = mask_heads(__lowercase , __lowercase , __lowercase ) prune_heads(__lowercase , __lowercase , __lowercase , __lowercase ) if __name__ == "__main__": main()
79
0
'''simple docstring''' import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 lowerCAmelCase_ : Any = data_utils.TransfoXLTokenizer lowerCAmelCase_ : str = data_utils.TransfoXLCorpus lowerCAmelCase_ : Union[str, Any] = data_utils lowerCAmelCase_ : List[str] = data_utils def _lowerCamelCase ( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Dict , lowercase : Dict ) -> Tuple: if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(lowercase , "rb" ) as fp: _a = pickle.load(lowercase , encoding="latin1" ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) _a = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"] print(F'Save vocabulary to {pytorch_vocab_dump_path}' ) _a = corpus.vocab.__dict__ torch.save(lowercase , lowercase ) _a = corpus.__dict__ corpus_dict_no_vocab.pop("vocab" , lowercase ) _a = pytorch_dump_folder_path + "/" + CORPUS_NAME print(F'Save dataset to {pytorch_dataset_dump_path}' ) torch.save(lowercase , lowercase ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model _a = os.path.abspath(lowercase ) _a = os.path.abspath(lowercase ) print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' ) # Initialise PyTorch model if transfo_xl_config_file == "": _a = TransfoXLConfig() else: _a = TransfoXLConfig.from_json_file(lowercase ) print(F'Building PyTorch model from configuration: {config}' ) _a = TransfoXLLMHeadModel(lowercase ) _a = load_tf_weights_in_transfo_xl(lowercase , lowercase , lowercase ) # Save pytorch-model _a = os.path.join(lowercase , lowercase ) _a = os.path.join(lowercase , lowercase ) print(F'Save PyTorch model to {os.path.abspath(lowercase )}' ) torch.save(model.state_dict() , lowercase ) print(F'Save configuration file to {os.path.abspath(lowercase )}' ) with open(lowercase , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase_ : str = argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the folder to store the PyTorch model or dataset/vocab.', ) parser.add_argument( '--tf_checkpoint_path', default='', type=str, help='An optional path to a TensorFlow checkpoint path to be converted.', ) parser.add_argument( '--transfo_xl_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained BERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--transfo_xl_dataset_file', default='', type=str, help='An optional dataset file to be converted in a vocabulary.', ) lowerCAmelCase_ : Optional[Any] = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
63
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = CycleDiffusionPipeline snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''} snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) _A = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _A = CLIPTextModel(__UpperCAmelCase ) _A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=0 ): '''simple docstring''' _A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _A = image / 2 + 0.5 if str(__UpperCAmelCase ).startswith("mps" ): _A = torch.manual_seed(__UpperCAmelCase ) else: _A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _A = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = "cpu" # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.get_dummy_components() for name, module in components.items(): if hasattr(__UpperCAmelCase , "half" ): _A = module.half() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def lowerCAmelCase ( self : Any ): '''simple docstring''' return super().test_save_load_local() @unittest.skip("non-deterministic pipeline" ) def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_inference_batch_single_identical() @skip_mps def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained( __UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images assert np.abs(image - expected_image ).max() < 2E-2
79
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer A_ = logging.get_logger(__name__) A_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A_ = { '''vocab_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt''' ), '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt''' ), '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''', '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json''' ), '''bert-base-multilingual-cased''': ( '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json''' ), '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-cased''': ( '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json''' ), }, } A_ = { '''bert-base-uncased''': 5_12, '''bert-large-uncased''': 5_12, '''bert-base-cased''': 5_12, '''bert-large-cased''': 5_12, '''bert-base-multilingual-uncased''': 5_12, '''bert-base-multilingual-cased''': 5_12, '''bert-base-chinese''': 5_12, '''bert-base-german-cased''': 5_12, '''bert-large-uncased-whole-word-masking''': 5_12, '''bert-large-cased-whole-word-masking''': 5_12, '''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_12, '''bert-large-cased-whole-word-masking-finetuned-squad''': 5_12, '''bert-base-cased-finetuned-mrpc''': 5_12, '''bert-base-german-dbmdz-cased''': 5_12, '''bert-base-german-dbmdz-uncased''': 5_12, '''TurkuNLP/bert-base-finnish-cased-v1''': 5_12, '''TurkuNLP/bert-base-finnish-uncased-v1''': 5_12, '''wietsedv/bert-base-dutch-cased''': 5_12, } A_ = { '''bert-base-uncased''': {'''do_lower_case''': True}, '''bert-large-uncased''': {'''do_lower_case''': True}, '''bert-base-cased''': {'''do_lower_case''': False}, '''bert-large-cased''': {'''do_lower_case''': False}, '''bert-base-multilingual-uncased''': {'''do_lower_case''': True}, '''bert-base-multilingual-cased''': {'''do_lower_case''': False}, '''bert-base-chinese''': {'''do_lower_case''': False}, '''bert-base-german-cased''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False}, '''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True}, '''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False}, '''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True}, '''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False}, } class lowercase( __a ): '''simple docstring''' lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_INIT_CONFIGURATION lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = BertTokenizer def __init__( self: int, a_: Any=None, a_: Tuple=None, a_: Tuple=True, a_: Union[str, Any]="[UNK]", a_: List[Any]="[SEP]", a_: Union[str, Any]="[PAD]", a_: List[str]="[CLS]", a_: Union[str, Any]="[MASK]", a_: List[str]=True, a_: List[str]=None, **a_: List[Any], ): '''simple docstring''' super().__init__( a_, tokenizer_file=a_, do_lower_case=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, tokenize_chinese_chars=a_, strip_accents=a_, **a_, ) _snake_case : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""", a_ ) != do_lower_case or normalizer_state.get("""strip_accents""", a_ ) != strip_accents or normalizer_state.get("""handle_chinese_chars""", a_ ) != tokenize_chinese_chars ): _snake_case : Optional[int] = getattr(a_, normalizer_state.pop("""type""" ) ) _snake_case : Optional[Any] = do_lower_case _snake_case : Optional[int] = strip_accents _snake_case : int = tokenize_chinese_chars _snake_case : Union[str, Any] = normalizer_class(**a_ ) _snake_case : str = do_lower_case def UpperCamelCase_ ( self: Dict, a_: Dict, a_: str=None ): '''simple docstring''' _snake_case : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self: Any, a_: List[int], a_: Optional[List[int]] = None ): '''simple docstring''' _snake_case : Dict = [self.sep_token_id] _snake_case : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self: Union[str, Any], a_: str, a_: Optional[str] = None ): '''simple docstring''' _snake_case : List[Any] = self._tokenizer.model.save(a_, name=a_ ) return tuple(a_ )
64
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
0
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def lowerCAmelCase_ ( __A, __A, __A, __A, __A = None, __A = None, __A = None, ) -> str: '''simple docstring''' if config_name_or_path is None: UpperCAmelCase__ = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base" if generator_tokenizer_name_or_path is None: UpperCAmelCase__ = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: UpperCAmelCase__ = question_encoder_name_or_path UpperCAmelCase__ = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration # Save model. UpperCAmelCase__ = RagConfig.from_pretrained(__A ) UpperCAmelCase__ = AutoConfig.from_pretrained(__A ) UpperCAmelCase__ = AutoConfig.from_pretrained(__A ) UpperCAmelCase__ = gen_config UpperCAmelCase__ = question_encoder_config UpperCAmelCase__ = model_class.from_pretrained_question_encoder_generator( __A, __A, config=__A ) rag_model.save_pretrained(__A ) # Sanity check. model_class.from_pretrained(__A ) # Save tokenizers. UpperCAmelCase__ = AutoTokenizer.from_pretrained(__A ) gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(__A ) question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument( '--model_type', choices=['rag_sequence', 'rag_token'], required=True, type=str, help='RAG model type: rag_sequence, rag_token', ) parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.') parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier') parser.add_argument( '--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier' ) parser.add_argument( '--generator_tokenizer_name_or_path', type=str, help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``', ) parser.add_argument( '--question_encoder_tokenizer_name_or_path', type=str, help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``', ) parser.add_argument( '--config_name_or_path', type=str, help=( 'Identifier of the model config to use, if not provided, resolves to a base config for a given' ' ``model_type``' ), ) UpperCamelCase__ = parser.parse_args() UpperCamelCase__ = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
65
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCamelCase_ = get_logger(__name__) class _UpperCAmelCase : """simple docstring""" snake_case = '''dummy_data''' snake_case = '''datasets''' snake_case = False def __init__( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Union[Version, str] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[List[Callable]] = None , ): '''simple docstring''' _A = 0 _A = dataset_name _A = cache_dir _A = use_local_dummy_data _A = config # download_callbacks take a single url as input _A = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _A = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _A = str(__UpperCAmelCase ) # to be downloaded _A = None _A = None @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self._dummy_file is None: _A = self.download_dummy_data() return self._dummy_file @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _A = cached_path( __UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase ) return os.path.join(__UpperCAmelCase , self.dummy_file_name ) @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' if self._bucket_url is None: _A = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def lowerCAmelCase ( self : str ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , *__UpperCAmelCase : Dict ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _A = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _A = self.dummy_file_name # special case when data_url is a dict if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase ) elif isinstance(__UpperCAmelCase , (list, tuple) ): return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase ) else: return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : Any ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : List[str] ): '''simple docstring''' return path def lowerCAmelCase ( self : str ): '''simple docstring''' return {} def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): for single_url in single_urls: download_callback(__UpperCAmelCase ) else: _A = single_urls download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) for x in single_urls] else: _A = single_urls _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) _A = value # make sure that values are unique if all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _A = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __UpperCAmelCase ) ) for url in data_url ) _A = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _A = [data_url[0]] * len(__UpperCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__UpperCAmelCase ) return dummy_data_list def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__UpperCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass def lowerCAmelCase ( self : Dict ): '''simple docstring''' pass def lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' def _iter_archive_members(__UpperCAmelCase : List[Any] ): # this preserves the order of the members inside the ZIP archive _A = Path(self.dummy_file ).parent _A = path.relative_to(__UpperCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _A = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__UpperCAmelCase ) _A = Path(__UpperCAmelCase ) _A = _iter_archive_members(__UpperCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__UpperCAmelCase ).as_posix(), file_path.open("rb" ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [paths] for path in paths: if os.path.isfile(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__UpperCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__UpperCAmelCase , __UpperCAmelCase )
79
0
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase : '''simple docstring''' def __init__( self: Optional[int] , snake_case: Any , snake_case: Optional[Any]=13 , snake_case: Tuple=32 , snake_case: Optional[int]=2 , snake_case: Tuple=3 , snake_case: Tuple=16 , snake_case: Optional[Any]=[1, 2, 1] , snake_case: Optional[int]=[2, 2, 4] , snake_case: Optional[int]=2 , snake_case: int=2.0 , snake_case: Union[str, Any]=True , snake_case: List[str]=0.0 , snake_case: List[Any]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[Any]="gelu" , snake_case: Optional[int]=False , snake_case: Union[str, Any]=True , snake_case: Union[str, Any]=0.0_2 , snake_case: Optional[int]=1E-5 , snake_case: Optional[Any]=True , snake_case: List[Any]=None , snake_case: List[Any]=True , snake_case: Optional[Any]=10 , snake_case: str=8 , ) -> Tuple: snake_case_ :Dict = parent snake_case_ :Any = batch_size snake_case_ :List[Any] = image_size snake_case_ :List[Any] = patch_size snake_case_ :int = num_channels snake_case_ :Tuple = embed_dim snake_case_ :str = depths snake_case_ :str = num_heads snake_case_ :Optional[int] = window_size snake_case_ :Tuple = mlp_ratio snake_case_ :Any = qkv_bias snake_case_ :List[Any] = hidden_dropout_prob snake_case_ :Optional[Any] = attention_probs_dropout_prob snake_case_ :Union[str, Any] = drop_path_rate snake_case_ :Any = hidden_act snake_case_ :Optional[Any] = use_absolute_embeddings snake_case_ :Union[str, Any] = patch_norm snake_case_ :Dict = layer_norm_eps snake_case_ :str = initializer_range snake_case_ :Tuple = is_training snake_case_ :Tuple = scope snake_case_ :Union[str, Any] = use_labels snake_case_ :Optional[Any] = type_sequence_label_size snake_case_ :Dict = encoder_stride def lowerCAmelCase_ ( self: int ) -> int: snake_case_ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ :Any = None if self.use_labels: snake_case_ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ :int = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCAmelCase_ ( self: str , snake_case: Optional[int] , snake_case: Dict , snake_case: str ) -> List[Any]: snake_case_ :Union[str, Any] = SwinvaModel(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Optional[int] = model(snake_case ) snake_case_ :Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case_ :int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCAmelCase_ ( self: int , snake_case: List[str] , snake_case: Tuple , snake_case: int ) -> Any: snake_case_ :Dict = SwinvaForMaskedImageModeling(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Tuple = model(snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images snake_case_ :List[Any] = 1 snake_case_ :int = SwinvaForMaskedImageModeling(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ :int = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCAmelCase_ ( self: List[Any] , snake_case: Any , snake_case: List[str] , snake_case: Union[str, Any] ) -> Tuple: snake_case_ :int = self.type_sequence_label_size snake_case_ :List[Any] = SwinvaForImageClassification(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Dict = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase_ ( self: int ) -> str: snake_case_ :Any = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ :List[str] = config_and_inputs snake_case_ :List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : Optional[Any] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) _A : Any = ( {"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification} if is_torch_available() else {} ) _A : List[Any] = False _A : List[str] = False _A : Tuple = False _A : List[str] = False def lowerCAmelCase_ ( self: Dict ) -> List[Any]: snake_case_ :Optional[int] = SwinvaModelTester(self ) snake_case_ :List[str] = ConfigTester(self , config_class=snake_case , embed_dim=37 ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple: snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> str: pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def lowerCAmelCase_ ( self: int ) -> Dict: pass def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]: snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ :List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def lowerCAmelCase_ ( self: Dict ) -> Optional[int]: snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) snake_case_ :List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ :int = [*signature.parameters.keys()] snake_case_ :List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]: snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :List[str] = True for model_class in self.all_model_classes: snake_case_ :List[Any] = True snake_case_ :Any = False snake_case_ :Optional[int] = True snake_case_ :Tuple = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Any = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :str = outputs.attentions snake_case_ :Dict = len(self.model_tester.depths ) self.assertEqual(len(snake_case ) , snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ :Union[str, Any] = True snake_case_ :Tuple = config.window_size**2 snake_case_ :Any = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :int = outputs.attentions self.assertEqual(len(snake_case ) , snake_case ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) snake_case_ :Any = len(snake_case ) # Check attention is always last and order is fine snake_case_ :int = True snake_case_ :Dict = True snake_case_ :Optional[int] = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Dict = model(**self._prepare_for_class(snake_case , snake_case ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): snake_case_ :Any = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states snake_case_ :int = 2 self.assertEqual(out_len + added_hidden_states , len(snake_case ) ) snake_case_ :str = outputs.attentions self.assertEqual(len(snake_case ) , snake_case ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def lowerCAmelCase_ ( self: int , snake_case: Dict , snake_case: Dict , snake_case: Optional[Any] , snake_case: Dict ) -> List[str]: snake_case_ :Dict = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :str = outputs.hidden_states snake_case_ :List[Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case ) , snake_case ) # Swinv2 has a different seq_length snake_case_ :List[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) snake_case_ :str = outputs.reshaped_hidden_states self.assertEqual(len(snake_case ) , snake_case ) snake_case_, snake_case_, snake_case_, snake_case_ :Any = reshaped_hidden_states[0].shape snake_case_ :int = ( reshaped_hidden_states[0].view(snake_case , snake_case , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCAmelCase_ ( self: Any ) -> Any: snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case_ :Union[str, Any] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :List[str] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Optional[int] = 3 snake_case_ :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case_ :str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case_ :int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case_ :str = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :Tuple = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) def lowerCAmelCase_ ( self: Any ) -> Tuple: snake_case_ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case ) def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: snake_case_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def lowerCAmelCase_ ( self: List[Any] ) -> Dict: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ :List[str] = SwinvaModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: snake_case_, snake_case_ :str = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Optional[int] = _config_zero_init(snake_case ) for model_class in self.all_model_classes: snake_case_ :Tuple = model_class(config=snake_case ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def lowerCAmelCase_ ( self: List[str] ) -> List[str]: snake_case_ :Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( snake_case ) snake_case_ :str = self.default_image_processor snake_case_ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) snake_case_ :str = image_processor(images=snake_case , return_tensors="""pt""" ).to(snake_case ) # forward pass with torch.no_grad(): snake_case_ :Tuple = model(**snake_case ) # verify the logits snake_case_ :Dict = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case ) snake_case_ :int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
66
'''simple docstring''' def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]: '''simple docstring''' if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ): _A = len(set_a.intersection(__lowercase ) ) if alternative_union: _A = len(__lowercase ) + len(__lowercase ) else: _A = len(set_a.union(__lowercase ) ) return intersection / union if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ): _A = [element for element in set_a if element in set_b] if alternative_union: _A = len(__lowercase ) + len(__lowercase ) return len(__lowercase ) / union else: _A = set_a + [element for element in set_b if element not in set_a] return len(__lowercase ) / len(__lowercase ) return len(__lowercase ) / len(__lowercase ) return None if __name__ == "__main__": lowerCamelCase_ = {'''a''', '''b''', '''c''', '''d''', '''e'''} lowerCamelCase_ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''} print(jaccard_similarity(set_a, set_b))
79
0
'''simple docstring''' def __lowerCAmelCase ( UpperCamelCase__ = 1_00_00_00 ) -> int: __lowerCamelCase = set(range(3 , UpperCamelCase__ , 2 ) ) primes.add(2 ) for p in range(3 , UpperCamelCase__ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , UpperCamelCase__ , UpperCamelCase__ ) ) ) __lowerCamelCase = [float(UpperCamelCase__ ) for n in range(limit + 1 )] for p in primes: for n in range(UpperCamelCase__ , limit + 1 , UpperCamelCase__ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f'{solution() = }')
67
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 0 snake_case = False snake_case = 3.0 class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def lowerCAmelCase ( self : int ): '''simple docstring''' _A = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _A = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _A = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , __UpperCAmelCase ) @require_multi_gpu def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowerCamelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCamelCase_ = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCamelCase_ = torch.nn.Linear(1_00, 2_00) lowerCamelCase_ = accelerator.prepare(model) # Check the values changed in kwargs lowerCamelCase_ = '''''' lowerCamelCase_ = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
79
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { """configuration_instructblip""": [ """INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InstructBlipConfig""", """InstructBlipQFormerConfig""", """InstructBlipVisionConfig""", ], """processing_instructblip""": ["""InstructBlipProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """InstructBlipQFormerModel""", """InstructBlipPreTrainedModel""", """InstructBlipForConditionalGeneration""", """InstructBlipVisionModel""", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
68
'''simple docstring''' def __lowercase ( __lowercase = 100 ) -> int: '''simple docstring''' _A = n * (n + 1) * (2 * n + 1) / 6 _A = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
79
0
"""simple docstring""" import warnings from functools import wraps from typing import Callable def UpperCAmelCase ( UpperCAmelCase ) -> Callable: @wraps(UpperCAmelCase ) def _inner_fn(*UpperCAmelCase , **UpperCAmelCase ): warnings.warn( (f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , UpperCAmelCase , ) return fn(*UpperCAmelCase , **UpperCAmelCase ) return _inner_fn
69
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') lowerCamelCase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the training data.'''} ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the validation data.'''} ) snake_case = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) snake_case = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) snake_case = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) snake_case = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : Optional[int]=192 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=0.6 ): '''simple docstring''' _A = input_size _A = mask_patch_size _A = model_patch_size _A = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("Input size must be divisible by mask patch size" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("Mask patch size must be divisible by model patch size" ) _A = self.input_size // self.mask_patch_size _A = self.mask_patch_size // self.model_patch_size _A = self.rand_size**2 _A = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : Any ): '''simple docstring''' _A = np.random.permutation(self.token_count )[: self.mask_count] _A = np.zeros(self.token_count , dtype=__UpperCAmelCase ) _A = 1 _A = mask.reshape((self.rand_size, self.rand_size) ) _A = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def __lowercase ( __lowercase ) -> str: '''simple docstring''' _A = torch.stack([example["pixel_values"] for example in examples] ) _A = torch.stack([example["mask"] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def __lowercase ( ) -> Dict: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mim" , __lowercase , __lowercase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(__lowercase ) transformers.utils.logging.set_verbosity(__lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if "validation" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0: _A = ds["train"].train_test_split(data_args.train_val_split ) _A = split["train"] _A = split["test"] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: _A = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowercase ) elif model_args.model_name_or_path: _A = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(__lowercase , "decoder_type" ): _A = "simmim" # adapt config _A = model_args.image_size if model_args.image_size is not None else config.image_size _A = model_args.patch_size if model_args.patch_size is not None else config.patch_size _A = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { "image_size": model_args.image_size, "patch_size": model_args.patch_size, "encoder_stride": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: _A = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase ) elif model_args.model_name_or_path: _A = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } _A = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: _A = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) _A = AutoModelForMaskedImageModeling.from_config(__lowercase ) if training_args.do_train: _A = ds["train"].column_names else: _A = ds["validation"].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = "image" elif "img" in column_names: _A = "img" else: _A = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py _A = Compose( [ Lambda(lambda __lowercase : img.convert("RGB" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator _A = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(__lowercase ): _A = [transforms(__lowercase ) for image in examples[image_column_name]] _A = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: _A = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__lowercase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: _A = ( ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__lowercase ) # Initialize our trainer _A = Trainer( model=__lowercase , args=__lowercase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics("eval" , __lowercase ) trainer.save_metrics("eval" , __lowercase ) # Write model card and (optionally) push to hub _A = { "finetuned_from": model_args.model_name_or_path, "tasks": "masked-image-modeling", "dataset": data_args.dataset_name, "tags": ["masked-image-modeling"], } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase ) else: trainer.create_model_card(**__lowercase ) if __name__ == "__main__": main()
79
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A__ : str ={'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str =['''YolosFeatureExtractor'''] A__ : Optional[Any] =['''YolosImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[int] =[ '''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''YolosForObjectDetection''', '''YolosModel''', '''YolosPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys A__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''canine''' def __init__( self : Dict , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : int=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[Any]=16384 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Dict=1E-12 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : int=0xE000 , __UpperCAmelCase : List[Any]=0xE001 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : int=16384 , __UpperCAmelCase : Union[str, Any]=128 , **__UpperCAmelCase : Dict , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = type_vocab_size _A = layer_norm_eps # Character config: _A = downsampling_rate _A = upsampling_kernel_size _A = num_hash_functions _A = num_hash_buckets _A = local_transformer_stride
79
0
import csv import tweepy # Twitter API credentials A_ :int = '''''' A_ :str = '''''' A_ :Dict = '''''' A_ :Optional[int] = '''''' def A ( a_ ) -> None: # authorize twitter, initialize tweepy __UpperCamelCase : str =tweepy.OAuthHandler(a_ ,a_ ) auth.set_access_token(a_ ,a_ ) __UpperCamelCase : Optional[int] =tweepy.API(a_ ) # initialize a list to hold all the tweepy Tweets __UpperCamelCase : Optional[Any] =[] # make initial request for most recent tweets (200 is the maximum allowed count) __UpperCamelCase : str =api.user_timeline(screen_name=a_ ,count=200 ) # save most recent tweets alltweets.extend(a_ ) # save the id of the oldest tweet less one __UpperCamelCase : Any =alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(a_ ) > 0: print(F'getting tweets before {oldest}' ) # all subsequent requests use the max_id param to prevent duplicates __UpperCamelCase : Optional[int] =api.user_timeline( screen_name=a_ ,count=200 ,max_id=a_ ) # save most recent tweets alltweets.extend(a_ ) # update the id of the oldest tweet less one __UpperCamelCase : Optional[Any] =alltweets[-1].id - 1 print(F'...{len(a_ )} tweets downloaded so far' ) # transform the tweepy tweets into a 2D array that will populate the csv __UpperCamelCase : Dict =[[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F'new_{screen_name}_tweets.csv' ,'w' ) as f: __UpperCamelCase : Optional[int] =csv.writer(a_ ) writer.writerow(['id', 'created_at', 'text'] ) writer.writerows(a_ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('''FirePing32''')
71
'''simple docstring''' class _UpperCAmelCase : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : list[int] ): '''simple docstring''' _A = len(__UpperCAmelCase ) _A = [0] * len_array if len_array > 0: _A = array[0] for i in range(1 , __UpperCAmelCase ): _A = self.prefix_sum[i - 1] + array[i] def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ): '''simple docstring''' _A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__UpperCAmelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
79
0
"""simple docstring""" import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class __snake_case ( _lowercase): snake_case__ : Optional[Any] = (CMStochasticIterativeScheduler,) snake_case__ : Tuple = 1_0 def SCREAMING_SNAKE_CASE ( self : str , **__lowerCAmelCase : List[str] ): """simple docstring""" _lowerCamelCase : List[str] = { '''num_train_timesteps''': 2_0_1, '''sigma_min''': 0.0_02, '''sigma_max''': 80.0, } config.update(**__lowerCAmelCase ) return config def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = 1_0 _lowerCamelCase : List[str] = self.get_scheduler_config() _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0](**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = scheduler.timesteps[0] _lowerCamelCase : str = scheduler.timesteps[1] _lowerCamelCase : List[Any] = self.dummy_sample _lowerCamelCase : str = 0.1 * sample _lowerCamelCase : str = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample _lowerCamelCase : Tuple = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config() _lowerCamelCase : int = scheduler_class(**__lowerCAmelCase ) _lowerCamelCase : str = 1 scheduler.set_timesteps(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = scheduler.timesteps _lowerCamelCase : Tuple = torch.manual_seed(0 ) _lowerCamelCase : int = self.dummy_model() _lowerCamelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(__lowerCAmelCase ): # 1. scale model input _lowerCamelCase : Dict = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) # 2. predict noise residual _lowerCamelCase : Optional[int] = model(__lowerCAmelCase , __lowerCAmelCase ) # 3. predict previous sample x_t-1 _lowerCamelCase : Any = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample _lowerCamelCase : List[str] = pred_prev_sample _lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) ) _lowerCamelCase : str = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2 assert abs(result_mean.item() - 0.25_10 ) < 1E-3 def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = self.scheduler_classes[0] _lowerCamelCase : List[str] = self.get_scheduler_config() _lowerCamelCase : str = scheduler_class(**__lowerCAmelCase ) _lowerCamelCase : int = [1_0_6, 0] scheduler.set_timesteps(timesteps=__lowerCAmelCase ) _lowerCamelCase : str = scheduler.timesteps _lowerCamelCase : str = torch.manual_seed(0 ) _lowerCamelCase : Optional[int] = self.dummy_model() _lowerCamelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input _lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) # 2. predict noise residual _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , __lowerCAmelCase ) # 3. predict previous sample x_t-1 _lowerCamelCase : Union[str, Any] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample _lowerCamelCase : List[str] = pred_prev_sample _lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) ) _lowerCamelCase : Tuple = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2 assert abs(result_mean.item() - 0.45_27 ) < 1E-3 def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : List[Any] = scheduler_class(**__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = [3_9, 3_0, 1_2, 1_5, 0] with self.assertRaises(__lowerCAmelCase , msg='''`timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : int = self.scheduler_classes[0] _lowerCamelCase : Any = self.get_scheduler_config() _lowerCamelCase : List[Any] = scheduler_class(**__lowerCAmelCase ) _lowerCamelCase : List[Any] = [3_9, 3_0, 1_2, 1, 0] _lowerCamelCase : Optional[Any] = len(__lowerCAmelCase ) with self.assertRaises(__lowerCAmelCase , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=__lowerCAmelCase , timesteps=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : Dict = self.get_scheduler_config() _lowerCamelCase : Dict = scheduler_class(**__lowerCAmelCase ) _lowerCamelCase : Dict = [scheduler.config.num_train_timesteps] with self.assertRaises( __lowerCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=__lowerCAmelCase )
72
'''simple docstring''' from typing import List import numpy as np def __lowercase ( __lowercase ) -> int: '''simple docstring''' _A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) _A = max(lists_lengths.values() , default=0 ) return max(1 , __lowercase ) def __lowercase ( __lowercase , __lowercase ) -> List[range]: '''simple docstring''' _A = [] for group_idx in range(__lowercase ): _A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _A = range(__lowercase , start + num_shards_to_add ) shards_indices_per_group.append(__lowercase ) return shards_indices_per_group def __lowercase ( __lowercase , __lowercase ) -> List[dict]: '''simple docstring''' _A = _number_of_shards_in_gen_kwargs(__lowercase ) if num_shards == 1: return [dict(__lowercase )] else: _A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__lowercase , __lowercase ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__lowercase ) ) ] def __lowercase ( __lowercase ) -> dict: '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __lowercase ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __lowercase ( __lowercase , __lowercase ) -> dict: '''simple docstring''' _A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )} _A = {} for size in list_sizes: _A = list(range(__lowercase ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _A = dict(__lowercase ) for key, value in shuffled_kwargs.items(): if isinstance(__lowercase , __lowercase ): _A = [value[i] for i in indices_per_size[len(__lowercase )]] return shuffled_kwargs
79
0
from ...configuration_utils import PretrainedConfig from ...utils import logging a =logging.get_logger(__name__) a ={ """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : List[Any] = '''dpr''' def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Tuple=3_0_5_2_2 ,SCREAMING_SNAKE_CASE__ : Any=7_6_8 ,SCREAMING_SNAKE_CASE__ : str=1_2 ,SCREAMING_SNAKE_CASE__ : Tuple=1_2 ,SCREAMING_SNAKE_CASE__ : Any=3_0_7_2 ,SCREAMING_SNAKE_CASE__ : Dict="gelu" ,SCREAMING_SNAKE_CASE__ : Tuple=0.1 ,SCREAMING_SNAKE_CASE__ : str=0.1 ,SCREAMING_SNAKE_CASE__ : int=5_1_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=1E-12 ,SCREAMING_SNAKE_CASE__ : Dict=0 ,SCREAMING_SNAKE_CASE__ : List[Any]="absolute" ,SCREAMING_SNAKE_CASE__ : int = 0 ,**SCREAMING_SNAKE_CASE__ : Dict ,): super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = vocab_size __lowerCamelCase : Optional[int] = hidden_size __lowerCamelCase : int = num_hidden_layers __lowerCamelCase : Any = num_attention_heads __lowerCamelCase : Dict = hidden_act __lowerCamelCase : str = intermediate_size __lowerCamelCase : Optional[Any] = hidden_dropout_prob __lowerCamelCase : Tuple = attention_probs_dropout_prob __lowerCamelCase : Tuple = max_position_embeddings __lowerCamelCase : int = type_vocab_size __lowerCamelCase : Tuple = initializer_range __lowerCamelCase : List[str] = layer_norm_eps __lowerCamelCase : Any = projection_dim __lowerCamelCase : Union[str, Any] = position_embedding_type
73
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_jukebox''': [ '''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''JukeboxConfig''', '''JukeboxPriorConfig''', '''JukeboxVQVAEConfig''', ], '''tokenization_jukebox''': ['''JukeboxTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''JukeboxModel''', '''JukeboxPreTrainedModel''', '''JukeboxVQVAE''', '''JukeboxPrior''', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''roformer''' def __init__( self : Tuple ,A_ : Optional[int]=5_0000 ,A_ : Tuple=None ,A_ : Optional[Any]=768 ,A_ : Dict=12 ,A_ : Optional[int]=12 ,A_ : Union[str, Any]=3072 ,A_ : Dict="gelu" ,A_ : Dict=0.1 ,A_ : List[Any]=0.1 ,A_ : List[Any]=1536 ,A_ : List[str]=2 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : Optional[int]=0 ,A_ : List[str]=False ,A_ : Tuple=True ,**A_ : List[str] ,) -> Dict: super().__init__(pad_token_id=A_ ,**A_ ) A = vocab_size A = hidden_size if embedding_size is None else embedding_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_act A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = initializer_range A = layer_norm_eps A = rotary_value A = use_cache class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": A = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A = {0: 'batch', 1: 'sequence'} A = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
74
'''simple docstring''' from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Union[str, Any] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None ): '''simple docstring''' super().__init__() _A = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" _A = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ) else: _A = None _A = torch.nn.Parameter(__UpperCAmelCase ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 def __init__( self : Any , __UpperCAmelCase : VQModel , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : TransformeraDModel , __UpperCAmelCase : VQDiffusionScheduler , __UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ): '''simple docstring''' super().__init__() self.register_modules( vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ): '''simple docstring''' _A = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1 # get prompt text embeddings _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) _A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) _A = text_input_ids[:, : self.tokenizer.model_max_length] _A = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 _A = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate text embeddings for each generation per prompt _A = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: _A = self.learned_classifier_free_sampling_embeddings.embeddings _A = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 ) else: _A = [""] * batch_size _A = text_input_ids.shape[-1] _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , ) _A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings _A = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _A = negative_prompt_embeds.shape[1] _A = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 ) _A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self : Optional[Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 5.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = 1 elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = len(__UpperCAmelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' ) _A = batch_size * num_images_per_prompt _A = guidance_scale > 1.0 _A = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(__UpperCAmelCase )}.''' ) # get the initial completely masked latents unless the user supplied it _A = (batch_size, self.transformer.num_latent_pixels) if latents is None: _A = self.transformer.num_vector_embeds - 1 _A = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) _A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device ) _A = self.scheduler.timesteps.to(self.device ) _A = latents for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ): # expand the sample if we are doing classifier free guidance _A = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` _A = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample if do_classifier_free_guidance: _A , _A = model_output.chunk(2 ) _A = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase ) _A = self.truncate(__UpperCAmelCase , __UpperCAmelCase ) # remove `log(0)`'s (`-inf`s) _A = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = self.vqvae.config.vq_embed_dim _A = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) _A = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase ) _A = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample _A = (image / 2 + 0.5).clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : float ): '''simple docstring''' _A , _A = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase ) _A = torch.exp(__UpperCAmelCase ) _A = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out _A = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase ) _A = torch.cat((all_true, keep_mask) , dim=1 ) _A = keep_mask[:, :-1, :] _A = keep_mask.gather(1 , indices.argsort(1 ) ) _A = log_p_x_0.clone() _A = -torch.inf # -inf = log(0) return rv
79
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( DiffusionPipeline, UnCLIPImageVariationPipeline, UnCLIPScheduler, UNetaDConditionModel, UNetaDModel, ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : str =UnCLIPImageVariationPipeline lowercase : Optional[Any] =IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'} lowercase : List[str] =IMAGE_VARIATION_BATCH_PARAMS lowercase : Optional[int] =[ 'generator', 'return_dict', 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] lowercase : int =False @property def lowercase__ ( self ): """simple docstring""" return 32 @property def lowercase__ ( self ): """simple docstring""" return 32 @property def lowercase__ ( self ): """simple docstring""" return self.time_input_dim @property def lowercase__ ( self ): """simple docstring""" return self.time_input_dim * 4 @property def lowercase__ ( self ): """simple docstring""" return 100 @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, ) return CLIPTextModelWithProjection(lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) return CLIPVisionModelWithProjection(lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ ={ '''clip_embeddings_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''cross_attention_dim''': self.cross_attention_dim, } lowerCamelCase_ =UnCLIPTextProjModel(**lowerCAmelCase ) return model @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ ={ '''sample_size''': 32, # RGB in channels '''in_channels''': 3, # Out channels is double in channels because predicts mean and variance '''out_channels''': 6, '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': '''identity''', } lowerCamelCase_ =UNetaDConditionModel(**lowerCAmelCase ) return model @property def lowercase__ ( self ): """simple docstring""" return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "in_channels": 6, "out_channels": 3, } @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =UNetaDModel(**self.dummy_super_res_kwargs ) return model @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(1 ) lowerCamelCase_ =UNetaDModel(**self.dummy_super_res_kwargs ) return model def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.dummy_decoder lowerCamelCase_ =self.dummy_text_proj lowerCamelCase_ =self.dummy_text_encoder lowerCamelCase_ =self.dummy_tokenizer lowerCamelCase_ =self.dummy_super_res_first lowerCamelCase_ =self.dummy_super_res_last lowerCamelCase_ =UnCLIPScheduler( variance_type='''learned_range''', prediction_type='''epsilon''', num_train_timesteps=1_000, ) lowerCamelCase_ =UnCLIPScheduler( variance_type='''fixed_small_log''', prediction_type='''epsilon''', num_train_timesteps=1_000, ) lowerCamelCase_ =CLIPImageProcessor(crop_size=32, size=32 ) lowerCamelCase_ =self.dummy_image_encoder return { "decoder": decoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_proj": text_proj, "feature_extractor": feature_extractor, "image_encoder": image_encoder, "super_res_first": super_res_first, "super_res_last": super_res_last, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0, lowerCAmelCase=True ): """simple docstring""" lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) if str(lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) if pil_image: lowerCamelCase_ =input_image * 0.5 + 0.5 lowerCamelCase_ =input_image.clamp(0, 1 ) lowerCamelCase_ =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy() lowerCamelCase_ =DiffusionPipeline.numpy_to_pil(lowerCAmelCase )[0] return { "image": input_image, "generator": generator, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "np", } def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase ) lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =output.images lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase ) lowerCamelCase_ =pipe( **lowerCAmelCase, return_dict=lowerCAmelCase, )[0] lowerCamelCase_ =image[0, -3:, -3:, -1] lowerCamelCase_ =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase_ =np.array( [ 0.9_9_9_7, 0.0_0_0_2, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_6_9, 0.0_0_2_3, 0.9_9_9_7, 0.9_9_6_9, 0.9_9_7_0, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase ) lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =output.images lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase ) lowerCamelCase_ =pipe( **lowerCAmelCase, return_dict=lowerCAmelCase, )[0] lowerCamelCase_ =image[0, -3:, -3:, -1] lowerCamelCase_ =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase_ =np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase ) lowerCamelCase_ =[ pipeline_inputs['''image'''], pipeline_inputs['''image'''], ] lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =output.images lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase ) lowerCamelCase_ =[ tuple_pipeline_inputs['''image'''], tuple_pipeline_inputs['''image'''], ] lowerCamelCase_ =pipe( **lowerCAmelCase, return_dict=lowerCAmelCase, )[0] lowerCamelCase_ =image[0, -3:, -3:, -1] lowerCamelCase_ =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) lowerCamelCase_ =np.array( [ 0.9_9_9_7, 0.9_9_8_9, 0.0_0_0_8, 0.0_0_2_1, 0.9_9_6_0, 0.0_0_1_8, 0.0_0_1_4, 0.0_0_0_2, 0.9_9_3_3, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =torch.device('''cpu''' ) class __UpperCamelCase : lowercase : Union[str, Any] =1 lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 ) lowerCamelCase_ =pipe.decoder.dtype lowerCamelCase_ =1 lowerCamelCase_ =( batch_size, pipe.decoder.config.in_channels, pipe.decoder.config.sample_size, pipe.decoder.config.sample_size, ) lowerCamelCase_ =pipe.prepare_latents( lowerCAmelCase, dtype=lowerCAmelCase, device=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, scheduler=DummyScheduler() ) lowerCamelCase_ =( batch_size, pipe.super_res_first.config.in_channels // 2, pipe.super_res_first.config.sample_size, pipe.super_res_first.config.sample_size, ) lowerCamelCase_ =pipe.prepare_latents( lowerCAmelCase, dtype=lowerCAmelCase, device=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, scheduler=DummyScheduler() ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase ) lowerCamelCase_ =pipe( **lowerCAmelCase, decoder_latents=lowerCAmelCase, super_res_latents=lowerCAmelCase ).images lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase ) # Don't pass image, instead pass embedding lowerCamelCase_ =pipeline_inputs.pop('''image''' ) lowerCamelCase_ =pipe.image_encoder(lowerCAmelCase ).image_embeds lowerCamelCase_ =pipe( **lowerCAmelCase, decoder_latents=lowerCAmelCase, super_res_latents=lowerCAmelCase, image_embeddings=lowerCAmelCase, ).images # make sure passing text embeddings manually is identical assert np.abs(img_out_a - img_out_a ).max() < 1e-4 @skip_mps def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =torch_device == '''cpu''' # Check is relaxed because there is not a torch 2.0 sliced attention added kv processor lowerCamelCase_ =1e-2 self._test_attention_slicing_forward_pass( test_max_difference=lowerCAmelCase, expected_max_diff=lowerCAmelCase ) @skip_mps def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =torch_device == '''cpu''' lowerCamelCase_ =True lowerCamelCase_ =[ '''decoder_num_inference_steps''', '''super_res_num_inference_steps''', ] self._test_inference_batch_single_identical( test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, additional_params_copy_to_batched_inputs=lowerCAmelCase, ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =[ '''decoder_num_inference_steps''', '''super_res_num_inference_steps''', ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes lowerCamelCase_ =[2, 3] self._test_inference_batch_consistent( batch_sizes=lowerCAmelCase, additional_params_copy_to_batched_inputs=lowerCAmelCase, ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=lowerCAmelCase ) @skip_mps def lowercase__ ( self ): """simple docstring""" return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowercase__ ( self ): """simple docstring""" return super().test_save_load_local() @skip_mps def lowercase__ ( self ): """simple docstring""" return super().test_save_load_optional_components() @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' ) lowerCamelCase_ =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' ) lowerCamelCase_ =UnCLIPImageVariationPipeline.from_pretrained( '''kakaobrain/karlo-v1-alpha-image-variations''', torch_dtype=torch.floataa ) lowerCamelCase_ =pipeline.to(lowerCAmelCase ) pipeline.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCamelCase_ =pipeline( lowerCAmelCase, generator=lowerCAmelCase, output_type='''np''', ) lowerCamelCase_ =output.images[0] assert image.shape == (256, 256, 3) assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase, 15 )
75
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def __lowercase ( __lowercase , __lowercase=False ) -> int: '''simple docstring''' _A = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: _A = "" else: _A = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[ : config.hidden_size, : ] _A = in_proj_bias[: config.hidden_size] _A = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _A = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _A = in_proj_weight[ -config.hidden_size :, : ] _A = in_proj_bias[-config.hidden_size :] def __lowercase ( __lowercase ) -> List[str]: '''simple docstring''' _A = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Tuple: '''simple docstring''' _A = dct.pop(__lowercase ) _A = val def __lowercase ( ) -> List[str]: '''simple docstring''' _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' _A = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=__lowercase , ) _A = ViTHybridConfig(backbone_config=__lowercase , image_size=384 , num_labels=1000 ) _A = False # load original model from timm _A = timm.create_model(__lowercase , pretrained=__lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _A = timm_model.state_dict() if base_model: remove_classification_head_(__lowercase ) _A = create_rename_keys(__lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase , __lowercase ) _A = "huggingface/label-files" _A = "imagenet-1k-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _A = ViTHybridModel(__lowercase ).eval() else: _A = ViTHybridForImageClassification(__lowercase ).eval() model.load_state_dict(__lowercase ) # create image processor _A = create_transform(**resolve_data_config({} , model=__lowercase ) ) _A = transform.transforms _A = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _A = ViTHybridImageProcessor( do_resize=__lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _A = prepare_img() _A = transform(__lowercase ).unsqueeze(0 ) _A = processor(__lowercase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__lowercase , __lowercase ) # verify logits with torch.no_grad(): _A = model(__lowercase ) _A = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: _A = timm_model.forward_features(__lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowercase , outputs.pooler_output , atol=1e-3 ) else: _A = timm_model(__lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__lowercase ) if push_to_hub: print(F'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(F'''ybelkada/{vit_name}''' ) processor.push_to_hub(F'''ybelkada/{vit_name}''' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_r50_s16_384''', type=str, help='''Name of the hybrid ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) lowerCamelCase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
79
0
def lowerCamelCase__ ( _a , _a): # Check if the input is valid if not len(_a) == len(_a) == 3: raise ValueError("Please enter a valid equation.") if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("Both a & b of two equations can't be zero.") # Extract the coefficients SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = equationa SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = equationa # Calculate the determinants of the matrices SCREAMING_SNAKE_CASE : Optional[Any] = aa * ba - aa * ba SCREAMING_SNAKE_CASE : Union[str, Any] = ca * ba - ca * ba SCREAMING_SNAKE_CASE : List[Any] = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("Infinite solutions. (Consistent system)") else: raise ValueError("No solution. (Inconsistent system)") else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: SCREAMING_SNAKE_CASE : List[str] = determinant_x / determinant SCREAMING_SNAKE_CASE : Union[str, Any] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
76
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
0
"""simple docstring""" import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") _UpperCamelCase : Any = logging.getLogger(__name__) @dataclass class UpperCAmelCase_ : lowerCamelCase__ : Optional[str] = field( default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."}) lowerCamelCase__ : Optional[str] = field( default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , ) lowerCamelCase__ : int = field( default=1_0_2_4 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) lowerCamelCase__ : bool = field( default=_a , metadata={"help": "Overwrite the cached preprocessed datasets or not."}) lowerCamelCase__ : bool = field( default=_a , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) lowerCamelCase__ : Optional[int] = field( default=_a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) lowerCamelCase__ : Optional[int] = field( default=_a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) lowerCamelCase__ : Optional[int] = field( default=_a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) } , ) lowerCamelCase__ : Optional[str] = field( default=_a , metadata={"help": "A csv or a json file containing the training data."}) lowerCamelCase__ : Optional[str] = field( default=_a , metadata={"help": "A csv or a json file containing the validation data."}) lowerCamelCase__ : Optional[str] = field(default=_a , metadata={"help": "A csv or a json file containing the test data."}) def _UpperCAmelCase ( self ) -> Optional[Any]: if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' ) else: lowercase__ : Optional[int] = self.train_file.split('.' )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." lowercase__ : Optional[Any] = self.validation_file.split('.' )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class UpperCAmelCase_ : lowerCamelCase__ : str = field( default=_a , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) lowerCamelCase__ : Optional[str] = field( default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"}) lowerCamelCase__ : Optional[str] = field( default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) lowerCamelCase__ : Optional[str] = field( default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) lowerCamelCase__ : bool = field( default=_a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) lowerCamelCase__ : str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) lowerCamelCase__ : bool = field( default=_a , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) def a_ ( ): '''simple docstring''' lowercase__ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase__ , lowercase__ , lowercase__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase__ , lowercase__ , lowercase__ : List[Any] = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) lowercase__ : int = training_args.get_process_log_level() logger.setLevel(_lowerCAmelCase ) datasets.utils.logging.set_verbosity(_lowerCAmelCase ) transformers.utils.logging.set_verbosity(_lowerCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. lowercase__ : Optional[int] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase__ : Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowercase__ : Optional[int] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. lowercase__ : Tuple = {'train': data_args.train_file, 'validation': data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: lowercase__ : List[Any] = data_args.train_file.split('.' )[-1] lowercase__ : Optional[int] = data_args.test_file.split('.' )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." lowercase__ : List[str] = data_args.test_file else: raise ValueError('Need either a GLUE task or a test file for `do_predict`.' ) for key in data_files.keys(): logger.info(f"""load a local file for {key}: {data_files[key]}""" ) if data_args.train_file.endswith('.csv' ): # Loading a dataset from local csv files lowercase__ : List[Any] = load_dataset('csv' , data_files=_lowerCAmelCase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files lowercase__ : List[Any] = load_dataset('json' , data_files=_lowerCAmelCase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels lowercase__ : int = raw_datasets['train'].features['label'].names lowercase__ : Union[str, Any] = len(_lowerCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase__ : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer lowercase__ : List[str] = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_lowerCAmelCase , ) lowercase__ : Optional[int] = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: lowercase__ : Tuple = 'max_length' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch lowercase__ : Optional[int] = False # Some models have set the order of the labels to use, so let's make sure we do use it. lowercase__ : str = {'Refused': 0, 'Entailed': 1} lowercase__ : Dict = {0: 'Refused', 1: 'Entailed'} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) lowercase__ : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(_lowerCAmelCase : Optional[int] ): # Tokenize the texts def _convert_table_text_to_pandas(_lowerCAmelCase : Union[str, Any] ): lowercase__ : Optional[int] = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )] lowercase__ : str = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd lowercase__ : Tuple = examples['statement'] lowercase__ : int = list(map(_convert_table_text_to_pandas , examples['table_text'] ) ) lowercase__ : Optional[int] = tokenizer(_lowerCAmelCase , _lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase ) lowercase__ : str = examples['label'] return result with training_args.main_process_first(desc='dataset map pre-processing' ): lowercase__ : str = raw_datasets.map( _lowerCAmelCase , batched=_lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) lowercase__ : int = raw_datasets['train'] if data_args.max_train_samples is not None: lowercase__ : int = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) lowercase__ : str = raw_datasets['validation'] if data_args.max_eval_samples is not None: lowercase__ : Any = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError('--do_predict requires a test dataset' ) lowercase__ : List[Any] = raw_datasets['test'] if data_args.max_predict_samples is not None: lowercase__ : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(_lowerCAmelCase ) ) , 3 ): logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_lowerCAmelCase : EvalPrediction ): lowercase__ : Optional[int] = p.predictions[0] if isinstance(p.predictions , _lowerCAmelCase ) else p.predictions lowercase__ : List[str] = np.argmax(_lowerCAmelCase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: lowercase__ : List[str] = default_data_collator elif training_args.fpaa: lowercase__ : Optional[Any] = DataCollatorWithPadding(_lowerCAmelCase , pad_to_multiple_of=8 ) else: lowercase__ : str = None # Initialize our Trainer lowercase__ : int = Trainer( model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , data_collator=_lowerCAmelCase , ) # Training if training_args.do_train: lowercase__ : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: lowercase__ : str = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase__ : Optional[int] = last_checkpoint lowercase__ : Tuple = trainer.train(resume_from_checkpoint=_lowerCAmelCase ) lowercase__ : Tuple = train_result.metrics lowercase__ : str = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCAmelCase ) ) lowercase__ : Any = min(_lowerCAmelCase , len(_lowerCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('train' , _lowerCAmelCase ) trainer.save_metrics('train' , _lowerCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) lowercase__ : Union[str, Any] = trainer.evaluate(eval_dataset=_lowerCAmelCase ) lowercase__ : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCAmelCase ) lowercase__ : List[str] = min(_lowerCAmelCase , len(_lowerCAmelCase ) ) trainer.log_metrics('eval' , _lowerCAmelCase ) trainer.save_metrics('eval' , _lowerCAmelCase ) if training_args.do_predict: logger.info('*** Predict ***' ) # Removing the `label` columns because it contains -1 and Trainer won't like that. lowercase__ : int = predict_dataset.remove_columns('label' ) lowercase__ : Any = trainer.predict(_lowerCAmelCase , metric_key_prefix='predict' ).predictions lowercase__ : str = np.argmax(_lowerCAmelCase , axis=1 ) lowercase__ : Optional[Any] = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' ) if trainer.is_world_process_zero(): with open(_lowerCAmelCase , 'w' ) as writer: logger.info('***** Predict Results *****' ) writer.write('index\tprediction\n' ) for index, item in enumerate(_lowerCAmelCase ): lowercase__ : Optional[int] = label_list[item] writer.write(f"""{index}\t{item}\n""" ) lowercase__ : Union[str, Any] = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'} if training_args.push_to_hub: trainer.push_to_hub(**_lowerCAmelCase ) else: trainer.create_model_card(**_lowerCAmelCase ) def a_ ( _lowerCAmelCase : List[Any] ): '''simple docstring''' main() if __name__ == "__main__": main()
77
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets lowerCamelCase_ = datasets.logging.get_logger(__name__) lowerCamelCase_ = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = "{COMET}: A Neural Framework for {MT} Evaluation", author = "Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", pages = "2685--2702", } ''' lowerCamelCase_ = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' lowerCamelCase_ = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results["scores"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase ( self : int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "sources": datasets.Value("string" , id="sequence" ), "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[ "https://github.com/Unbabel/COMET", "https://www.aclweb.org/anthology/2020.emnlp-main.213/", "http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6", ] , ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ): '''simple docstring''' if self.config_name == "default": _A = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) ) else: _A = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowerCAmelCase ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=False ): '''simple docstring''' if gpus is None: _A = 1 if torch.cuda.is_available() else 0 _A = {"src": sources, "mt": predictions, "ref": references} _A = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for t in zip(*data.values() )] _A , _A = self.scorer.predict(__UpperCAmelCase , gpus=__UpperCAmelCase , progress_bar=__UpperCAmelCase ) return {"mean_score": mean_score, "scores": scores}
79
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available snake_case_ = { """configuration_squeezebert""": [ """SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SqueezeBertConfig""", """SqueezeBertOnnxConfig""", ], """tokenization_squeezebert""": ["""SqueezeBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ["""SqueezeBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ """SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """SqueezeBertForMaskedLM""", """SqueezeBertForMultipleChoice""", """SqueezeBertForQuestionAnswering""", """SqueezeBertForSequenceClassification""", """SqueezeBertForTokenClassification""", """SqueezeBertModel""", """SqueezeBertModule""", """SqueezeBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
78
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase , __lowercase = None , __lowercase = None ) -> None: '''simple docstring''' if start is None: _A = 0 if end is None: _A = len(__lowercase ) - 1 if start >= end: return _A = (start + end) // 2 slowsort(__lowercase , __lowercase , __lowercase ) slowsort(__lowercase , mid + 1 , __lowercase ) if sequence[end] < sequence[mid]: _A , _A = sequence[mid], sequence[end] slowsort(__lowercase , __lowercase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
79
0
'''simple docstring''' import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def _UpperCamelCase ( ) -> List[Any]: '''simple docstring''' UpperCamelCase__ = ArgumentParser( description=( "PyTorch TPU distributed training launch " "helper utility that will spawn up " "multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores" , type=__A , default=1 , help="Number of TPU cores to use (1 or 8)." ) # positional parser.add_argument( "training_script" , type=__A , help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ) , ) # rest from the training program parser.add_argument("training_script_args" , nargs=__A ) return parser.parse_args() def _UpperCamelCase ( ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ = parse_args() # Import training_script as a module. UpperCamelCase__ = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) UpperCamelCase__ = script_fpath.stem UpperCamelCase__ = importlib.import_module(__A ) # Patch sys.argv UpperCamelCase__ = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
80
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : """simple docstring""" snake_case = PegasusConfig snake_case = {} snake_case = '''gelu''' def __init__( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : int=7 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : str=False , __UpperCAmelCase : Union[str, Any]=99 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Tuple=37 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=40 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Any=0 , ): '''simple docstring''' _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = eos_token_id _A = pad_token_id _A = bos_token_id def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A = tf.concat([input_ids, eos_tensor] , axis=1 ) _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _A = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, inputs_dict def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ): '''simple docstring''' _A = TFPegasusModel(config=__UpperCAmelCase ).get_decoder() _A = inputs_dict["input_ids"] _A = input_ids[:1, :] _A = inputs_dict["attention_mask"][:1, :] _A = inputs_dict["head_mask"] _A = 1 # first forward pass _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) _A , _A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _A = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _A = tf.concat([input_ids, next_tokens] , axis=-1 ) _A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _A = output_from_no_past[:, -3:, random_slice_idx] _A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: _A = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () snake_case = (TFPegasusForConditionalGeneration,) if is_tf_available() else () snake_case = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) snake_case = True snake_case = False snake_case = False def lowerCAmelCase ( self : str ): '''simple docstring''' _A = TFPegasusModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] snake_case = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers snake_case = '''google/pegasus-xsum''' @cached_property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.translate_src_text(**__UpperCAmelCase ) assert self.expected_text == generated_words def lowerCAmelCase ( self : Dict , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.tokenizer(self.src_text , **__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="tf" ) _A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , ) _A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase ) return generated_words @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self._assert_generated_batch_equal_expected()
79
0
"""simple docstring""" import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow lowerCamelCase_ : Tuple = False class __A ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self , __A=32 ) -> List[Any]: set_seed(0 ) a =UNetaDModel(sample_size=__A , in_channels=3 , out_channels=3 ) a =torch.optim.SGD(model.parameters() , lr=0.0_001 ) return model, optimizer @slow def SCREAMING_SNAKE_CASE ( self ) -> Any: a ='''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable a =DDPMScheduler( num_train_timesteps=1000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__A , ) a =DDIMScheduler( num_train_timesteps=1000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__A , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) a =[torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(__A ) for _ in range(4 )] a =[torch.randn((4, 3, 32, 32) ).to(__A ) for _ in range(4 )] a =[torch.randint(0 , 1000 , (4,) ).long().to(__A ) for _ in range(4 )] # train with a DDPM scheduler a , a =self.get_model_optimizer(resolution=32 ) model.train().to(__A ) for i in range(4 ): optimizer.zero_grad() a =ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) a =model(__A , timesteps[i] ).sample a =torch.nn.functional.mse_loss(__A , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM a , a =self.get_model_optimizer(resolution=32 ) model.train().to(__A ) for i in range(4 ): optimizer.zero_grad() a =ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) a =model(__A , timesteps[i] ).sample a =torch.nn.functional.mse_loss(__A , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(__A , __A , atol=1E-5 ) ) self.assertTrue(torch.allclose(__A , __A , atol=1E-5 ) )
81
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : int=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=99 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=None , ): '''simple docstring''' _A = parent _A = 13 _A = 7 _A = True _A = True _A = True _A = True _A = 99 _A = 32 _A = 2 _A = 4 _A = 37 _A = "gelu" _A = 0.1 _A = 0.1 _A = 512 _A = 16 _A = 2 _A = 0.02 _A = 3 _A = 4 _A = None def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = TFRoFormerModel(config=__UpperCAmelCase ) _A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _A = [input_ids, input_mask] _A = model(__UpperCAmelCase ) _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = True _A = TFRoFormerForCausalLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase )["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ): '''simple docstring''' _A = TFRoFormerForMaskedLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForSequenceClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = self.num_choices _A = TFRoFormerForMultipleChoice(config=__UpperCAmelCase ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForTokenClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' _A = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) snake_case = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) snake_case = False snake_case = False def lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) _A = tf.constant([[0, 1, 2, 3, 4, 5]] ) _A = model(__UpperCAmelCase )[0] # TODO Replace vocab size _A = 50000 _A = [1, 6, vocab_size] self.assertEqual(output.shape , __UpperCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _A = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = tf.constant([[4, 10]] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _A = emba(input_ids.shape ) _A = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) _A = emba.weight[:3, :5] tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : str ): '''simple docstring''' _A = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) _A = embed_positions([2, 16, 768] )[None, None, :, :] _A , _A = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) _A = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
79
0
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated A__ = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ A__ = """https://storage.googleapis.com/cvdf-datasets/mnist/""" def _UpperCAmelCase ( snake_case ): """simple docstring""" _lowerCAmelCase = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" ) return numpy.frombuffer(bytestream.read(4 ) , dtype=snake_case )[0] @deprecated(snake_case , """Please use tf.data to implement this functionality.""" ) def _UpperCAmelCase ( snake_case ): """simple docstring""" print("""Extracting""" , f.name ) with gzip.GzipFile(fileobj=snake_case ) as bytestream: _lowerCAmelCase = _readaa(snake_case ) if magic != 20_51: raise ValueError( """Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) ) _lowerCAmelCase = _readaa(snake_case ) _lowerCAmelCase = _readaa(snake_case ) _lowerCAmelCase = _readaa(snake_case ) _lowerCAmelCase = bytestream.read(rows * cols * num_images ) _lowerCAmelCase = numpy.frombuffer(snake_case , dtype=numpy.uinta ) _lowerCAmelCase = data.reshape(snake_case , snake_case , snake_case , 1 ) return data @deprecated(snake_case , """Please use tf.one_hot on tensors.""" ) def _UpperCAmelCase ( snake_case , snake_case ): """simple docstring""" _lowerCAmelCase = labels_dense.shape[0] _lowerCAmelCase = numpy.arange(snake_case ) * num_classes _lowerCAmelCase = numpy.zeros((num_labels, num_classes) ) _lowerCAmelCase = 1 return labels_one_hot @deprecated(snake_case , """Please use tf.data to implement this functionality.""" ) def _UpperCAmelCase ( snake_case , snake_case=False , snake_case=10 ): """simple docstring""" print("""Extracting""" , f.name ) with gzip.GzipFile(fileobj=snake_case ) as bytestream: _lowerCAmelCase = _readaa(snake_case ) if magic != 20_49: raise ValueError( """Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) ) _lowerCAmelCase = _readaa(snake_case ) _lowerCAmelCase = bytestream.read(snake_case ) _lowerCAmelCase = numpy.frombuffer(snake_case , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(snake_case , snake_case ) return labels class __lowerCAmelCase : @deprecated( _snake_case , """Please use alternatives such as official/mnist/_DataSet.py""" """ from tensorflow/models.""" , ) def __init__( self , _snake_case , _snake_case , _snake_case=False , _snake_case=False , _snake_case=dtypes.floataa , _snake_case=True , _snake_case=None , ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = random_seed.get_seed(_snake_case ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) _lowerCAmelCase = dtypes.as_dtype(_snake_case ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype ) if fake_data: _lowerCAmelCase = 10000 _lowerCAmelCase = one_hot else: assert ( images.shape[0] == labels.shape[0] ), F'images.shape: {images.shape} labels.shape: {labels.shape}' _lowerCAmelCase = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 _lowerCAmelCase = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. _lowerCAmelCase = images.astype(numpy.floataa ) _lowerCAmelCase = numpy.multiply(_snake_case , 1.0 / 255.0 ) _lowerCAmelCase = images _lowerCAmelCase = labels _lowerCAmelCase = 0 _lowerCAmelCase = 0 @property def snake_case ( self ): """simple docstring""" return self._images @property def snake_case ( self ): """simple docstring""" return self._labels @property def snake_case ( self ): """simple docstring""" return self._num_examples @property def snake_case ( self ): """simple docstring""" return self._epochs_completed def snake_case ( self , _snake_case , _snake_case=False , _snake_case=True ): """simple docstring""" if fake_data: _lowerCAmelCase = [1] * 784 _lowerCAmelCase = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(_snake_case )], [fake_label for _ in range(_snake_case )], ) _lowerCAmelCase = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: _lowerCAmelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(_snake_case ) _lowerCAmelCase = self.images[perma] _lowerCAmelCase = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch _lowerCAmelCase = self._num_examples - start _lowerCAmelCase = self._images[start : self._num_examples] _lowerCAmelCase = self._labels[start : self._num_examples] # Shuffle the data if shuffle: _lowerCAmelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(_snake_case ) _lowerCAmelCase = self.images[perm] _lowerCAmelCase = self.labels[perm] # Start next epoch _lowerCAmelCase = 0 _lowerCAmelCase = batch_size - rest_num_examples _lowerCAmelCase = self._index_in_epoch _lowerCAmelCase = self._images[start:end] _lowerCAmelCase = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size _lowerCAmelCase = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(snake_case , """Please write your own downloading logic.""" ) def _UpperCAmelCase ( snake_case , snake_case , snake_case ): """simple docstring""" if not gfile.Exists(snake_case ): gfile.MakeDirs(snake_case ) _lowerCAmelCase = os.path.join(snake_case , snake_case ) if not gfile.Exists(snake_case ): urllib.request.urlretrieve(snake_case , snake_case ) # noqa: S310 with gfile.GFile(snake_case ) as f: _lowerCAmelCase = f.size() print("""Successfully downloaded""" , snake_case , snake_case , """bytes.""" ) return filepath @deprecated( snake_case , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" ) def _UpperCAmelCase ( snake_case , snake_case=False , snake_case=False , snake_case=dtypes.floataa , snake_case=True , snake_case=50_00 , snake_case=None , snake_case=DEFAULT_SOURCE_URL , ): """simple docstring""" if fake_data: def fake(): return _DataSet( [] , [] , fake_data=snake_case , one_hot=snake_case , dtype=snake_case , seed=snake_case ) _lowerCAmelCase = fake() _lowerCAmelCase = fake() _lowerCAmelCase = fake() return _Datasets(train=snake_case , validation=snake_case , test=snake_case ) if not source_url: # empty string check _lowerCAmelCase = DEFAULT_SOURCE_URL _lowerCAmelCase = """train-images-idx3-ubyte.gz""" _lowerCAmelCase = """train-labels-idx1-ubyte.gz""" _lowerCAmelCase = """t10k-images-idx3-ubyte.gz""" _lowerCAmelCase = """t10k-labels-idx1-ubyte.gz""" _lowerCAmelCase = _maybe_download( snake_case , snake_case , source_url + train_images_file ) with gfile.Open(snake_case , """rb""" ) as f: _lowerCAmelCase = _extract_images(snake_case ) _lowerCAmelCase = _maybe_download( snake_case , snake_case , source_url + train_labels_file ) with gfile.Open(snake_case , """rb""" ) as f: _lowerCAmelCase = _extract_labels(snake_case , one_hot=snake_case ) _lowerCAmelCase = _maybe_download( snake_case , snake_case , source_url + test_images_file ) with gfile.Open(snake_case , """rb""" ) as f: _lowerCAmelCase = _extract_images(snake_case ) _lowerCAmelCase = _maybe_download( snake_case , snake_case , source_url + test_labels_file ) with gfile.Open(snake_case , """rb""" ) as f: _lowerCAmelCase = _extract_labels(snake_case , one_hot=snake_case ) if not 0 <= validation_size <= len(snake_case ): _lowerCAmelCase = ( """Validation size should be between 0 and """ F'{len(snake_case )}. Received: {validation_size}.' ) raise ValueError(snake_case ) _lowerCAmelCase = train_images[:validation_size] _lowerCAmelCase = train_labels[:validation_size] _lowerCAmelCase = train_images[validation_size:] _lowerCAmelCase = train_labels[validation_size:] _lowerCAmelCase = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed} _lowerCAmelCase = _DataSet(snake_case , snake_case , **snake_case ) _lowerCAmelCase = _DataSet(snake_case , snake_case , **snake_case ) _lowerCAmelCase = _DataSet(snake_case , snake_case , **snake_case ) return _Datasets(train=snake_case , validation=snake_case , test=snake_case )
82
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''gpt_neox''' def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=50432 , __UpperCAmelCase : Any=6144 , __UpperCAmelCase : List[str]=44 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : List[str]=24576 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-5 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ): '''simple docstring''' super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = vocab_size _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = rotary_pct _A = rotary_emb_base _A = attention_dropout _A = hidden_dropout _A = classifier_dropout _A = initializer_range _A = layer_norm_eps _A = use_cache _A = tie_word_embeddings _A = use_parallel_residual _A = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'''got {self.rope_scaling}''' ) _A = self.rope_scaling.get("type" , __UpperCAmelCase ) _A = self.rope_scaling.get("factor" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
79
0
'''simple docstring''' def A__ ( UpperCAmelCase_ = 1 , UpperCAmelCase_ = 1_0_0_0 ): _UpperCamelCase : int = 1 _UpperCamelCase : Union[str, Any] = 0 for divide_by_number in range(UpperCAmelCase_ , digit + 1 ): _UpperCamelCase : list[int] = [] _UpperCamelCase : int = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(UpperCAmelCase_ ): _UpperCamelCase : Optional[Any] = len(UpperCAmelCase_ ) _UpperCamelCase : List[Any] = divide_by_number else: has_been_divided.append(UpperCAmelCase_ ) _UpperCamelCase : str = now_divide * 1_0 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
83
'''simple docstring''' from PIL import Image def __lowercase ( __lowercase , __lowercase ) -> Image: '''simple docstring''' _A = (259 * (level + 255)) / (255 * (259 - level)) def contrast(__lowercase ) -> int: return int(128 + factor * (c - 128) ) return img.point(__lowercase ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 lowerCamelCase_ = change_contrast(img, 1_70) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
79
0
"""simple docstring""" import math def _snake_case ( lowercase__ : int = 1_0_0 ) -> int: '''simple docstring''' lowerCAmelCase_ :int = sum(i * i for i in range(1 , n + 1 ) ) lowerCAmelCase_ :Tuple = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(F"""{solution() = }""")
84
'''simple docstring''' def __lowercase ( __lowercase ) -> int: '''simple docstring''' assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: _A = F'''The input value of [n={number}] has to be > 0''' raise ValueError(__lowercase ) else: _A = sylvester(number - 1 ) _A = num - 1 _A = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
79
0
'''simple docstring''' from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def UpperCamelCase_( snake_case : Dict , snake_case : Dict ): '''simple docstring''' snake_case_ = k_size // 2 snake_case_ , snake_case_ = mgrid[0 - center : k_size - center, 0 - center : k_size - center] snake_case_ = 1 / (2 * pi * sigma) * exp(-(square(snake_case ) + square(snake_case )) / (2 * square(snake_case )) ) return g def UpperCamelCase_( snake_case : List[str] , snake_case : Any , snake_case : List[Any] ): '''simple docstring''' snake_case_ , snake_case_ = image.shape[0], image.shape[1] # dst image height and width snake_case_ = height - k_size + 1 snake_case_ = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows snake_case_ = zeros((dst_height * dst_width, k_size * k_size) ) snake_case_ = 0 for i, j in product(range(snake_case ) , range(snake_case ) ): snake_case_ = ravel(image[i : i + k_size, j : j + k_size] ) snake_case_ = window row += 1 # turn the kernel into shape(k*k, 1) snake_case_ = gen_gaussian_kernel(snake_case , snake_case ) snake_case_ = ravel(snake_case ) # reshape and get the dst image snake_case_ = dot(snake_case , snake_case ).reshape(snake_case , snake_case ).astype(snake_case ) return dst if __name__ == "__main__": # read original image _SCREAMING_SNAKE_CASE : Union[str, Any] = imread(r"../image_data/lena.jpg") # turn image in gray scale value _SCREAMING_SNAKE_CASE : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size _SCREAMING_SNAKE_CASE : Optional[Any] = gaussian_filter(gray, 3, sigma=1) _SCREAMING_SNAKE_CASE : Any = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("gaussian filter with 3x3 mask", gaussianaxa) imshow("gaussian filter with 5x5 mask", gaussianaxa) waitKey()
85
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowerCamelCase_ = logging.getLogger(__name__) def __lowercase ( __lowercase , __lowercase ) -> Optional[int]: '''simple docstring''' if os.path.exists(__lowercase ): if os.path.exists(os.path.join(__lowercase , "config.json" ) ) and os.path.isfile( os.path.join(__lowercase , "config.json" ) ): os.remove(os.path.join(__lowercase , "config.json" ) ) if os.path.exists(os.path.join(__lowercase , "pytorch_model.bin" ) ) and os.path.isfile( os.path.join(__lowercase , "pytorch_model.bin" ) ): os.remove(os.path.join(__lowercase , "pytorch_model.bin" ) ) else: os.makedirs(__lowercase ) model.save_pretrained(__lowercase ) def __lowercase ( __lowercase , __lowercase=False ) -> Optional[int]: '''simple docstring''' _A = 2 if unlogit: _A = torch.pow(__lowercase , __lowercase ) _A = p * torch.log(__lowercase ) _A = 0 return -plogp.sum(dim=-1 ) def __lowercase ( __lowercase ) -> Optional[Any]: '''simple docstring''' logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(__lowercase ) ) ) ) for row in range(len(__lowercase ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=True , __lowercase=True , __lowercase=None , __lowercase=False ) -> int: '''simple docstring''' _A , _A = model.config.num_hidden_layers, model.config.num_attention_heads _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) if head_mask is None: _A = torch.ones(__lowercase , __lowercase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowercase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _A = None _A = 0.0 _A = 0.0 for step, inputs in enumerate(tqdm(__lowercase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ): _A = tuple(t.to(args.device ) for t in inputs ) ((_A) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _A = model(__lowercase , labels=__lowercase , head_mask=__lowercase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _A , _A , _A = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowercase ): _A = entropy(attn.detach() , __lowercase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowercase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _A = 2 _A = torch.pow(torch.pow(__lowercase , __lowercase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: _A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("Attention entropies" ) print_ad_tensor(__lowercase ) if compute_importance: logger.info("Head importance scores" ) print_ad_tensor(__lowercase ) logger.info("Head ranked by importance scores" ) _A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _A = torch.arange( head_importance.numel() , device=args.device ) _A = head_ranks.view_as(__lowercase ) print_ad_tensor(__lowercase ) return attn_entropy, head_importance, total_loss def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A , _A , _A = compute_heads_importance(__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase ) _A = 1 / loss # instead of downsteam score use the LM loss logger.info("Pruning: original score: %f, threshold: %f" , __lowercase , original_score * args.masking_threshold ) _A = torch.ones_like(__lowercase ) _A = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _A = original_score while current_score >= original_score * args.masking_threshold: _A = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _A = float("Inf" ) _A = head_importance.view(-1 ).sort()[1] if len(__lowercase ) <= num_to_mask: print("BREAK BY num_to_mask" ) break # mask heads _A = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) ) _A = new_head_mask.view(-1 ) _A = 0.0 _A = new_head_mask.view_as(__lowercase ) _A = new_head_mask.clone().detach() print_ad_tensor(__lowercase ) # Compute metric and head importance again _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , head_mask=__lowercase ) _A = 1 / loss logger.info( "Masking: current score: %f, remaining heads %d (%.1f percents)" , __lowercase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info("Final head mask" ) print_ad_tensor(__lowercase ) np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() ) return head_mask def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase ) _A = 1 / loss _A = datetime.now() - before_time _A = sum(p.numel() for p in model.parameters() ) _A = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowercase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowercase , __lowercase ): _A = [ v, ] assert sum(len(__lowercase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowercase ) _A = sum(p.numel() for p in model.parameters() ) _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase , actually_pruned=__lowercase , ) _A = 1 / loss _A = datetime.now() - before_time logger.info( "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __lowercase , __lowercase , pruned_num_params / original_num_params * 100 , ) logger.info("Pruning: score with masking: %f score with pruning: %f" , __lowercase , __lowercase ) logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 ) save_model(__lowercase , args.output_dir ) def __lowercase ( ) -> Union[str, Any]: '''simple docstring''' _A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , ) parser.add_argument( "--model_name_or_path" , default=__lowercase , type=__lowercase , required=__lowercase , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--output_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The output directory where the model predictions and checkpoints will be written." , ) # Other parameters parser.add_argument( "--config_name" , default="" , type=__lowercase , help="Pretrained config name or path if not the same as model_name_or_path" , ) parser.add_argument( "--tokenizer_name" , default="" , type=__lowercase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , ) parser.add_argument( "--cache_dir" , default=__lowercase , type=__lowercase , help="Where do you want to store the pre-trained models downloaded from s3" , ) parser.add_argument( "--data_subset" , type=__lowercase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." ) parser.add_argument( "--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" ) parser.add_argument( "--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , ) parser.add_argument( "--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." ) parser.add_argument( "--masking_threshold" , default=0.9 , type=__lowercase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , ) parser.add_argument( "--masking_amount" , default=0.1 , type=__lowercase , help="Amount to heads to masking at each masking step." ) parser.add_argument("--metric_name" , default="acc" , type=__lowercase , help="Metric to use for head masking." ) parser.add_argument( "--max_seq_length" , default=128 , type=__lowercase , help=( "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, sequences shorter padded." ) , ) parser.add_argument("--batch_size" , default=1 , type=__lowercase , help="Batch size." ) parser.add_argument("--seed" , type=__lowercase , default=42 ) parser.add_argument("--local_rank" , type=__lowercase , default=-1 , help="local_rank for distributed training on gpus" ) parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" ) parser.add_argument("--server_ip" , type=__lowercase , default="" , help="Can be used for distant debugging." ) parser.add_argument("--server_port" , type=__lowercase , default="" , help="Can be used for distant debugging." ) _A = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowercase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _A = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" ) _A = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _A = torch.device("cuda" , args.local_rank ) _A = 1 torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _A = nn.parallel.DistributedDataParallel( __lowercase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowercase ) elif args.n_gpu > 1: _A = nn.DataParallel(__lowercase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowercase ) torch.save(__lowercase , os.path.join(args.output_dir , "run_args.bin" ) ) logger.info("Training/evaluation parameters %s" , __lowercase ) # Prepare dataset _A = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _A = (torch.from_numpy(__lowercase ),) _A = TensorDataset(*__lowercase ) _A = RandomSampler(__lowercase ) _A = DataLoader(__lowercase , sampler=__lowercase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowercase , __lowercase , __lowercase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _A = mask_heads(__lowercase , __lowercase , __lowercase ) prune_heads(__lowercase , __lowercase , __lowercase , __lowercase ) if __name__ == "__main__": main()
79
0
"""simple docstring""" import enum import shutil import sys lowerCamelCase__ , lowerCamelCase__ = shutil.get_terminal_size() lowerCamelCase__ = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""} class A__ ( enum.Enum): A_ : Union[str, Any] = 0 A_ : List[Any] = 1 def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase="" ): sys.stdout.write(str(_UpperCamelCase ) + end ) sys.stdout.flush() def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ): forceWrite(F"\u001b[{color}m{content}\u001b[0m" , _UpperCamelCase ) def __lowerCAmelCase (): forceWrite('\r' ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): forceWrite(F"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}" ) def __lowerCAmelCase (): forceWrite(' ' * TERMINAL_WIDTH ) reset_cursor() def __lowerCAmelCase (): reset_cursor() forceWrite('-' * TERMINAL_WIDTH )
86
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = CycleDiffusionPipeline snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''} snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) _A = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _A = CLIPTextModel(__UpperCAmelCase ) _A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=0 ): '''simple docstring''' _A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _A = image / 2 + 0.5 if str(__UpperCAmelCase ).startswith("mps" ): _A = torch.manual_seed(__UpperCAmelCase ) else: _A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _A = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = "cpu" # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.get_dummy_components() for name, module in components.items(): if hasattr(__UpperCAmelCase , "half" ): _A = module.half() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def lowerCAmelCase ( self : Any ): '''simple docstring''' return super().test_save_load_local() @unittest.skip("non-deterministic pipeline" ) def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_inference_batch_single_identical() @skip_mps def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained( __UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images assert np.abs(image - expected_image ).max() < 2E-2
79
0
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class snake_case_ ( __A ): def __init__( self : Optional[Any] , lowercase_ : AutoencoderKL , lowercase_ : CLIPTextModel , lowercase_ : CLIPTokenizer , lowercase_ : UNetaDConditionModel , lowercase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowercase_ : StableDiffusionSafetyChecker , lowercase_ : CLIPImageProcessor , ) -> Optional[int]: super().__init__() self.register_modules( vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , ) def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Union[str, int]] = "auto" ) -> Optional[int]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowercase__ : str = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowercase_ ) def __UpperCamelCase ( self : Optional[int] ) -> Tuple: self.enable_attention_slicing(lowercase_ ) @torch.no_grad() def __call__( self : int , lowercase_ : Union[str, List[str]] , lowercase_ : int = 5_12 , lowercase_ : int = 5_12 , lowercase_ : int = 50 , lowercase_ : float = 7.5 , lowercase_ : Optional[Union[str, List[str]]] = None , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , lowercase_ : Optional[torch.FloatTensor] = None , **lowercase_ : int , ) -> List[Any]: if isinstance(lowercase_ , lowercase_ ): lowercase__ : Optional[int] = 1 elif isinstance(lowercase_ , lowercase_ ): lowercase__ : List[Any] = len(lowercase_ ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(lowercase_ )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(lowercase_ )}.''' ) # get prompt text embeddings lowercase__ : List[str] = self.tokenizer( lowercase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) lowercase__ : str = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: lowercase__ : Any = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) lowercase__ : str = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: lowercase__ : str = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method lowercase__ , lowercase__ , lowercase__ : List[Any] = text_embeddings.shape lowercase__ : Union[str, Any] = text_embeddings.repeat(1 , lowercase_ , 1 ) lowercase__ : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. lowercase__ : Tuple = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: lowercase__ : List[str] if negative_prompt is None: lowercase__ : int = [""] elif type(lowercase_ ) is not type(lowercase_ ): raise TypeError( F'''`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_ )} !=''' F''' {type(lowercase_ )}.''' ) elif isinstance(lowercase_ , lowercase_ ): lowercase__ : str = [negative_prompt] elif batch_size != len(lowercase_ ): raise ValueError( F'''`negative_prompt`: {negative_prompt} has batch size {len(lowercase_ )}, but `prompt`:''' F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' " the batch size of `prompt`." ) else: lowercase__ : Union[str, Any] = negative_prompt lowercase__ : List[Any] = text_input_ids.shape[-1] lowercase__ : Any = self.tokenizer( lowercase_ , padding="max_length" , max_length=lowercase_ , truncation=lowercase_ , return_tensors="pt" , ) lowercase__ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowercase__ : Union[str, Any] = uncond_embeddings.shape[1] lowercase__ : str = uncond_embeddings.repeat(lowercase_ , lowercase_ , 1 ) lowercase__ : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowercase__ : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. lowercase__ : int = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) lowercase__ : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) lowercase__ : Union[str, Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps lowercase__ : Tuple = torch.randn( lowercase_ , generator=lowercase_ , device="cpu" , dtype=lowercase_ ).to(self.device ) lowercase__ : Union[str, Any] = torch.randn(lowercase_ , generator=lowercase_ , device="cpu" , dtype=lowercase_ ).to( self.device ) else: lowercase__ : Any = torch.randn( lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ ) lowercase__ : Tuple = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ ) else: if latents_reference.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) lowercase__ : Dict = latents_reference.to(self.device ) lowercase__ : str = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images lowercase__ : Union[str, Any] = (latents_shape[3] - latents_shape_reference[3]) // 2 lowercase__ : str = (latents_shape[2] - latents_shape_reference[2]) // 2 lowercase__ : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx lowercase__ : int = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy lowercase__ : Any = 0 if dx < 0 else dx lowercase__ : Optional[Any] = 0 if dy < 0 else dy lowercase__ : List[Any] = max(-dx , 0 ) lowercase__ : str = max(-dy , 0 ) # import pdb # pdb.set_trace() lowercase__ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(lowercase_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand lowercase__ : int = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowercase__ : Optional[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowercase__ : Tuple = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowercase__ : int = {} if accepts_eta: lowercase__ : List[Any] = eta for i, t in enumerate(self.progress_bar(lowercase_ ) ): # expand the latents if we are doing classifier free guidance lowercase__ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase__ : Any = self.scheduler.scale_model_input(lowercase_ , lowercase_ ) # predict the noise residual lowercase__ : Optional[int] = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_ ).sample # perform guidance if do_classifier_free_guidance: lowercase__ , lowercase__ : List[str] = noise_pred.chunk(2 ) lowercase__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 lowercase__ : Optional[int] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ , lowercase_ ) lowercase__ : int = 1 / 0.1_82_15 * latents lowercase__ : Dict = self.vae.decode(lowercase_ ).sample lowercase__ : Tuple = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowercase__ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: lowercase__ : List[str] = self.feature_extractor(self.numpy_to_pil(lowercase_ ) , return_tensors="pt" ).to( self.device ) lowercase__ , lowercase__ : int = self.safety_checker( images=lowercase_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: lowercase__ : List[str] = None if output_type == "pil": lowercase__ : List[str] = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_ )
87
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
0
def a__ ( A_ ): '''simple docstring''' if collection == []: return [] # get some information about the collection __magic_name__ = len(A_ ) __magic_name__ = max(A_ ) __magic_name__ = min(A_ ) # create the counting array __magic_name__ = coll_max + 1 - coll_min __magic_name__ = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1, A_ ): __magic_name__ = counting_arr[i] + counting_arr[i - 1] # create the output collection __magic_name__ = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0, A_ ) ): __magic_name__ = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def a__ ( A_ ): '''simple docstring''' return "".join([chr(A_ ) for i in counting_sort([ord(A_ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt" __lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip() __lowerCAmelCase : Dict = [int(item) for item in user_input.split(',')] print(counting_sort(unsorted))
88
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCamelCase_ = get_logger(__name__) class _UpperCAmelCase : """simple docstring""" snake_case = '''dummy_data''' snake_case = '''datasets''' snake_case = False def __init__( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Union[Version, str] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[List[Callable]] = None , ): '''simple docstring''' _A = 0 _A = dataset_name _A = cache_dir _A = use_local_dummy_data _A = config # download_callbacks take a single url as input _A = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _A = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _A = str(__UpperCAmelCase ) # to be downloaded _A = None _A = None @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self._dummy_file is None: _A = self.download_dummy_data() return self._dummy_file @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _A = cached_path( __UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase ) return os.path.join(__UpperCAmelCase , self.dummy_file_name ) @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' if self._bucket_url is None: _A = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def lowerCAmelCase ( self : str ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , *__UpperCAmelCase : Dict ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _A = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _A = self.dummy_file_name # special case when data_url is a dict if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase ) elif isinstance(__UpperCAmelCase , (list, tuple) ): return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase ) else: return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : Any ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : List[str] ): '''simple docstring''' return path def lowerCAmelCase ( self : str ): '''simple docstring''' return {} def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): for single_url in single_urls: download_callback(__UpperCAmelCase ) else: _A = single_urls download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) for x in single_urls] else: _A = single_urls _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) _A = value # make sure that values are unique if all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _A = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __UpperCAmelCase ) ) for url in data_url ) _A = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _A = [data_url[0]] * len(__UpperCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__UpperCAmelCase ) return dummy_data_list def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__UpperCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass def lowerCAmelCase ( self : Dict ): '''simple docstring''' pass def lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' def _iter_archive_members(__UpperCAmelCase : List[Any] ): # this preserves the order of the members inside the ZIP archive _A = Path(self.dummy_file ).parent _A = path.relative_to(__UpperCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _A = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__UpperCAmelCase ) _A = Path(__UpperCAmelCase ) _A = _iter_archive_members(__UpperCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__UpperCAmelCase ).as_posix(), file_path.open("rb" ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [paths] for path in paths: if os.path.isfile(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__UpperCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__UpperCAmelCase , __UpperCAmelCase )
79
0
'''simple docstring''' # This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests __lowerCAmelCase = open # noqa: we just need to have a builtin inside this module to test it properly
89
'''simple docstring''' def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]: '''simple docstring''' if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ): _A = len(set_a.intersection(__lowercase ) ) if alternative_union: _A = len(__lowercase ) + len(__lowercase ) else: _A = len(set_a.union(__lowercase ) ) return intersection / union if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ): _A = [element for element in set_a if element in set_b] if alternative_union: _A = len(__lowercase ) + len(__lowercase ) return len(__lowercase ) / union else: _A = set_a + [element for element in set_b if element not in set_a] return len(__lowercase ) / len(__lowercase ) return len(__lowercase ) / len(__lowercase ) return None if __name__ == "__main__": lowerCamelCase_ = {'''a''', '''b''', '''c''', '''d''', '''e'''} lowerCamelCase_ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''} print(jaccard_similarity(set_a, set_b))
79
0
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = {"vocab_file": "spiece.model"} __A = { "vocab_file": { "albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model", "albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model", "albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model", "albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model", "albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model", "albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model", "albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model", "albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model", } } __A = { "albert-base-v1": 5_12, "albert-large-v1": 5_12, "albert-xlarge-v1": 5_12, "albert-xxlarge-v1": 5_12, "albert-base-v2": 5_12, "albert-large-v2": 5_12, "albert-xlarge-v2": 5_12, "albert-xxlarge-v2": 5_12, } __A = "▁" class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__="[CLS]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="<unk>" , lowerCamelCase__="[SEP]" , lowerCamelCase__="<pad>" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None: '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. __lowerCamelCase = ( AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ , normalized=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token ) __lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , ) __lowerCamelCase = do_lower_case __lowerCamelCase = remove_space __lowerCamelCase = keep_accents __lowerCamelCase = vocab_file __lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCamelCase__ ) @property def lowercase_ ( self ) -> List[str]: '''simple docstring''' return len(self.sp_model ) def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> int: '''simple docstring''' __lowerCamelCase = self.__dict__.copy() __lowerCamelCase = None return state def __setstate__( self , lowerCamelCase__ ) -> List[Any]: '''simple docstring''' __lowerCamelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowerCamelCase = {} __lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' if self.remove_space: __lowerCamelCase = ' '.join(inputs.strip().split() ) else: __lowerCamelCase = inputs __lowerCamelCase = outputs.replace('``' , '"' ).replace('\'\'' , '"' ) if not self.keep_accents: __lowerCamelCase = unicodedata.normalize('NFKD' , lowerCamelCase__ ) __lowerCamelCase = ''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase__ )] ) if self.do_lower_case: __lowerCamelCase = outputs.lower() return outputs def lowercase_ ( self , lowerCamelCase__ ) -> List[str]: '''simple docstring''' __lowerCamelCase = self.preprocess_text(lowerCamelCase__ ) __lowerCamelCase = self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ ) __lowerCamelCase = [] for piece in pieces: if len(lowerCamelCase__ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit(): __lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase__ , '' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: __lowerCamelCase = cur_pieces[1:] else: __lowerCamelCase = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(lowerCamelCase__ ) else: new_pieces.append(lowerCamelCase__ ) return new_pieces def lowercase_ ( self , lowerCamelCase__ ) -> str: '''simple docstring''' return self.sp_model.PieceToId(lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' return self.sp_model.IdToPiece(lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = '' __lowerCamelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCamelCase__ ) + token __lowerCamelCase = True __lowerCamelCase = [] else: current_sub_tokens.append(lowerCamelCase__ ) __lowerCamelCase = False out_string += self.sp_model.decode(lowerCamelCase__ ) return out_string.strip() def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]: '''simple docstring''' __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ ) if token_ids_a is not None: return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1] return [1] + ([0] * len(lowerCamelCase__ )) + [1] def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]: '''simple docstring''' __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCamelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCamelCase = os.path.join( lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase__ , 'wb' ) as fi: __lowerCamelCase = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase__ ) return (out_vocab_file,)
90
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 0 snake_case = False snake_case = 3.0 class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def lowerCAmelCase ( self : int ): '''simple docstring''' _A = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _A = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _A = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , __UpperCAmelCase ) @require_multi_gpu def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowerCamelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCamelCase_ = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCamelCase_ = torch.nn.Linear(1_00, 2_00) lowerCamelCase_ = accelerator.prepare(model) # Check the values changed in kwargs lowerCamelCase_ = '''''' lowerCamelCase_ = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
79
0
"""simple docstring""" from __future__ import annotations from collections.abc import MutableSequence class lowerCAmelCase__ : '''simple docstring''' def __init__( self : List[str] , lowercase_ : int , lowercase_ : MutableSequence[float]): '''simple docstring''' if len(lowercase_) != degree + 1: raise ValueError( '''The number of coefficients should be equal to the degree + 1.''') SCREAMING_SNAKE_CASE_ : list[float] = list(lowercase_) SCREAMING_SNAKE_CASE_ : str = degree def __add__( self : int , lowercase_ : Polynomial): '''simple docstring''' if self.degree > polynomial_a.degree: SCREAMING_SNAKE_CASE_ : List[str] = self.coefficients[:] for i in range(polynomial_a.degree + 1): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , lowercase_) else: SCREAMING_SNAKE_CASE_ : int = polynomial_a.coefficients[:] for i in range(self.degree + 1): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , lowercase_) def __sub__( self : str , lowercase_ : Polynomial): '''simple docstring''' return self + polynomial_a * Polynomial(0 , [-1]) def __neg__( self : str): '''simple docstring''' return Polynomial(self.degree , [-c for c in self.coefficients]) def __mul__( self : List[Any] , lowercase_ : Polynomial): '''simple docstring''' SCREAMING_SNAKE_CASE_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1): for j in range(polynomial_a.degree + 1): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int | float): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int | float = 0 for i in range(self.degree + 1): result += self.coefficients[i] * (substitution**i) return result def __str__( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = '''''' for i in range(self.degree , -1 , -1): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i])) elif i == 1: polynomial += str(abs(self.coefficients[i])) + "x" else: polynomial += str(abs(self.coefficients[i])) + "x^" + str(lowercase_) return polynomial def __repr__( self : Tuple): '''simple docstring''' return self.__str__() def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : list[float] = [0] * self.degree for i in range(self.degree): SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , lowercase_) def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : int | float = 0): '''simple docstring''' SCREAMING_SNAKE_CASE_ : list[float] = [0] * (self.degree + 2) SCREAMING_SNAKE_CASE_ : Any = constant for i in range(self.degree + 1): SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , lowercase_) def __eq__( self : str , lowercase_ : object): '''simple docstring''' if not isinstance(lowercase_ , lowercase_): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self : Union[str, Any] , lowercase_ : object): '''simple docstring''' return not self.__eq__(lowercase_)
91
'''simple docstring''' def __lowercase ( __lowercase = 100 ) -> int: '''simple docstring''' _A = n * (n + 1) * (2 * n + 1) / 6 _A = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
79
0
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class a__ ( snake_case__ ): _a : Optional[int] = """""" _a : str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _a : str = None # compression type in fsspec. ex: "gzip" _a : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self , _A = "" , _A = None , _A = None , **_A ): """simple docstring""" super().__init__(self , **_A ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode __lowerCAmelCase = fsspec.open( _A , mode="rb" , protocol=_A , compression=self.compression , client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) __lowerCAmelCase = os.path.basename(self.file.path.split("::" )[0] ) __lowerCAmelCase = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) __lowerCAmelCase = None @classmethod def __SCREAMING_SNAKE_CASE( cls , _A ): """simple docstring""" return super()._strip_protocol(_A ).lstrip("/" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if self.dir_cache is None: __lowerCAmelCase = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} __lowerCAmelCase = {f["name"]: f} def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" return self.file.open().read() def __SCREAMING_SNAKE_CASE( self , _A , _A = "rb" , _A=None , _A=True , _A=None , **_A , ): """simple docstring""" __lowerCAmelCase = self._strip_protocol(_A ) if mode != "rb": raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" ) return self.file.open() class a__ ( snake_case__ ): _a : Any = """bz2""" _a : Dict = """bz2""" _a : Union[str, Any] = """.bz2""" class a__ ( snake_case__ ): _a : int = """gzip""" _a : List[Any] = """gzip""" _a : Optional[int] = """.gz""" class a__ ( snake_case__ ): _a : Optional[int] = """lz4""" _a : Any = """lz4""" _a : Tuple = """.lz4""" class a__ ( snake_case__ ): _a : Dict = """xz""" _a : List[Any] = """xz""" _a : Optional[Any] = """.xz""" class a__ ( snake_case__ ): _a : Union[str, Any] = """zstd""" _a : int = """zstd""" _a : int = """.zst""" def __init__( self , _A , _A = "rb" , _A = None , _A = None , _A = DEFAULT_BLOCK_SIZE , **_A , ): """simple docstring""" super().__init__( fo=_A , mode=_A , target_protocol=_A , target_options=_A , block_size=_A , **_A , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 __lowerCAmelCase = self.file.__enter__ class a__ : def __init__( self , _A ): """simple docstring""" __lowerCAmelCase = file_ def __enter__( self ): """simple docstring""" self._file.__enter__() return self def __exit__( self , *_A , **_A ): """simple docstring""" self._file.__exit__(*_A , **_A ) def __iter__( self ): """simple docstring""" return iter(self._file ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return next(self._file ) def __getattr__( self , _A ): """simple docstring""" return getattr(self._file , _A ) def fixed_enter(*_A , **_A ): return WrappedFile(_enter(*_A , **_A ) ) __lowerCAmelCase = fixed_enter
92
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') lowerCamelCase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the training data.'''} ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the validation data.'''} ) snake_case = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) snake_case = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) snake_case = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) snake_case = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : Optional[int]=192 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=0.6 ): '''simple docstring''' _A = input_size _A = mask_patch_size _A = model_patch_size _A = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("Input size must be divisible by mask patch size" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("Mask patch size must be divisible by model patch size" ) _A = self.input_size // self.mask_patch_size _A = self.mask_patch_size // self.model_patch_size _A = self.rand_size**2 _A = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : Any ): '''simple docstring''' _A = np.random.permutation(self.token_count )[: self.mask_count] _A = np.zeros(self.token_count , dtype=__UpperCAmelCase ) _A = 1 _A = mask.reshape((self.rand_size, self.rand_size) ) _A = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def __lowercase ( __lowercase ) -> str: '''simple docstring''' _A = torch.stack([example["pixel_values"] for example in examples] ) _A = torch.stack([example["mask"] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def __lowercase ( ) -> Dict: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mim" , __lowercase , __lowercase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(__lowercase ) transformers.utils.logging.set_verbosity(__lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if "validation" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0: _A = ds["train"].train_test_split(data_args.train_val_split ) _A = split["train"] _A = split["test"] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: _A = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowercase ) elif model_args.model_name_or_path: _A = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(__lowercase , "decoder_type" ): _A = "simmim" # adapt config _A = model_args.image_size if model_args.image_size is not None else config.image_size _A = model_args.patch_size if model_args.patch_size is not None else config.patch_size _A = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { "image_size": model_args.image_size, "patch_size": model_args.patch_size, "encoder_stride": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: _A = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase ) elif model_args.model_name_or_path: _A = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } _A = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: _A = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) _A = AutoModelForMaskedImageModeling.from_config(__lowercase ) if training_args.do_train: _A = ds["train"].column_names else: _A = ds["validation"].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = "image" elif "img" in column_names: _A = "img" else: _A = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py _A = Compose( [ Lambda(lambda __lowercase : img.convert("RGB" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator _A = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(__lowercase ): _A = [transforms(__lowercase ) for image in examples[image_column_name]] _A = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: _A = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__lowercase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: _A = ( ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__lowercase ) # Initialize our trainer _A = Trainer( model=__lowercase , args=__lowercase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics("eval" , __lowercase ) trainer.save_metrics("eval" , __lowercase ) # Write model card and (optionally) push to hub _A = { "finetuned_from": model_args.model_name_or_path, "tasks": "masked-image-modeling", "dataset": data_args.dataset_name, "tags": ["masked-image-modeling"], } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase ) else: trainer.create_model_card(**__lowercase ) if __name__ == "__main__": main()
79
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase : Optional[int] = logging.get_logger(__name__) _lowercase : Union[str, Any] = {"vocab_file": "spiece.model"} _lowercase : Optional[Any] = { "vocab_file": { "bert_for_seq_generation": ( "https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model" ), } } _lowercase : List[str] = {"bert_for_seq_generation": 5_1_2} class lowerCAmelCase__ ( lowerCamelCase_ ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = [] lowerCAmelCase_ = ['''input_ids''', '''attention_mask'''] def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<::::>" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" lowercase_ : str = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) lowercase_ : Dict = vocab_file lowercase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) @property def _snake_case ( self ): """simple docstring""" return self.sp_model.get_piece_size() def _snake_case ( self ): """simple docstring""" lowercase_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowercase_ : int = self.__dict__.copy() lowercase_ : Union[str, Any] = None return state def __setstate__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : Optional[Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase_ : List[Any] = {} lowercase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE ) def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : Dict = self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) return token def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : Optional[int] = [] lowercase_ : Any = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token lowercase_ : List[str] = [] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string.strip() def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ : int = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: lowercase_ : Any = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
93
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''canine''' def __init__( self : Dict , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : int=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[Any]=16384 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Dict=1E-12 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : int=0xE000 , __UpperCAmelCase : List[Any]=0xE001 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : int=16384 , __UpperCAmelCase : Union[str, Any]=128 , **__UpperCAmelCase : Dict , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = type_vocab_size _A = layer_norm_eps # Character config: _A = downsampling_rate _A = upsampling_kernel_size _A = num_hash_functions _A = num_hash_buckets _A = local_transformer_stride
79
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = tempfile.mkdtemp() a :List[str] = BlipImageProcessor() a :List[Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' ) a :int = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) a :str = InstructBlipProcessor(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ): return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).tokenizer def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ): return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).image_processor def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ): return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).qformer_tokenizer def SCREAMING_SNAKE_CASE__ ( self ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] a :Tuple = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def SCREAMING_SNAKE_CASE__ ( self ): a :Union[str, Any] = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) a :List[str] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) a :Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) a :Union[str, Any] = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) self.assertIsInstance(processor.qformer_tokenizer , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Tuple = self.get_image_processor() a :Tuple = self.get_tokenizer() a :Union[str, Any] = self.get_qformer_tokenizer() a :Optional[Any] = InstructBlipProcessor( tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase , qformer_tokenizer=_lowerCamelCase ) a :Optional[int] = self.prepare_image_inputs() a :Optional[int] = image_processor(_lowerCamelCase , return_tensors='''np''' ) a :int = processor(images=_lowerCamelCase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self ): a :Dict = self.get_image_processor() a :Any = self.get_tokenizer() a :Dict = self.get_qformer_tokenizer() a :Optional[Any] = InstructBlipProcessor( tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase , qformer_tokenizer=_lowerCamelCase ) a :str = '''lower newer''' a :Any = processor(text=_lowerCamelCase ) a :List[Any] = tokenizer(_lowerCamelCase , return_token_type_ids=_lowerCamelCase ) a :Optional[int] = qformer_tokenizer(_lowerCamelCase , return_token_type_ids=_lowerCamelCase ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.get_image_processor() a :str = self.get_tokenizer() a :Union[str, Any] = self.get_qformer_tokenizer() a :Tuple = InstructBlipProcessor( tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase , qformer_tokenizer=_lowerCamelCase ) a :List[Any] = '''lower newer''' a :Union[str, Any] = self.prepare_image_inputs() a :Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual( list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = self.get_image_processor() a :Union[str, Any] = self.get_tokenizer() a :int = self.get_qformer_tokenizer() a :Optional[Any] = InstructBlipProcessor( tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase , qformer_tokenizer=_lowerCamelCase ) a :Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a :Optional[Any] = processor.batch_decode(_lowerCamelCase ) a :List[str] = tokenizer.batch_decode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Dict = self.get_image_processor() a :List[Any] = self.get_tokenizer() a :str = self.get_qformer_tokenizer() a :List[str] = InstructBlipProcessor( tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase , qformer_tokenizer=_lowerCamelCase ) a :int = '''lower newer''' a :Tuple = self.prepare_image_inputs() a :Dict = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual( list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
94
'''simple docstring''' class _UpperCAmelCase : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : list[int] ): '''simple docstring''' _A = len(__UpperCAmelCase ) _A = [0] * len_array if len_array > 0: _A = array[0] for i in range(1 , __UpperCAmelCase ): _A = self.prefix_sum[i - 1] + array[i] def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ): '''simple docstring''' _A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__UpperCAmelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
79
0
from math import ceil, sqrt def _A ( SCREAMING_SNAKE_CASE : int = 1_000_000 ): """simple docstring""" a__ : int =0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: a__ : Any =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: a__ : List[str] =1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(F"""{solution() = }""")
95
'''simple docstring''' from typing import List import numpy as np def __lowercase ( __lowercase ) -> int: '''simple docstring''' _A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) _A = max(lists_lengths.values() , default=0 ) return max(1 , __lowercase ) def __lowercase ( __lowercase , __lowercase ) -> List[range]: '''simple docstring''' _A = [] for group_idx in range(__lowercase ): _A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _A = range(__lowercase , start + num_shards_to_add ) shards_indices_per_group.append(__lowercase ) return shards_indices_per_group def __lowercase ( __lowercase , __lowercase ) -> List[dict]: '''simple docstring''' _A = _number_of_shards_in_gen_kwargs(__lowercase ) if num_shards == 1: return [dict(__lowercase )] else: _A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__lowercase , __lowercase ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__lowercase ) ) ] def __lowercase ( __lowercase ) -> dict: '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __lowercase ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __lowercase ( __lowercase , __lowercase ) -> dict: '''simple docstring''' _A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )} _A = {} for size in list_sizes: _A = list(range(__lowercase ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _A = dict(__lowercase ) for key, value in shuffled_kwargs.items(): if isinstance(__lowercase , __lowercase ): _A = [value[i] for i in indices_per_size[len(__lowercase )]] return shuffled_kwargs
79
0
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase__ = """.""" # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) lowercase__ = [ """Assert""", """AssignVariableOp""", """EmptyTensorList""", """MergeV2Checkpoints""", """ReadVariableOp""", """ResourceGather""", """RestoreV2""", """SaveV2""", """ShardedFilename""", """StatefulPartitionedCall""", """StaticRegexFullMatch""", """VarHandleOp""", ] def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : Dict = SavedModel() _lowerCamelCase : Optional[int] = [] with open(os.path.join(lowercase__ , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f: _lowerCamelCase : Any = json.load(lowercase__ )['opsets'] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(lowercase__ )] ) with open(lowercase__ , 'rb' ) as f: saved_model.ParseFromString(f.read() ) _lowerCamelCase : List[str] = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want _lowerCamelCase : Union[str, Any] = sorted(lowercase__ ) _lowerCamelCase : Optional[int] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(lowercase__ ) if strict and len(lowercase__ ) > 0: raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(lowercase__ ) > 0: print(f'''Found the following incompatible ops for the opset {opset}:''' ) print(*lowercase__ , sep='\n' ) else: print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""") parser.add_argument( """--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested.""" ) parser.add_argument( """--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model.""" ) parser.add_argument( """--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)""" ) lowercase__ = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
96
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_jukebox''': [ '''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''JukeboxConfig''', '''JukeboxPriorConfig''', '''JukeboxVQVAEConfig''', ], '''tokenization_jukebox''': ['''JukeboxTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''JukeboxModel''', '''JukeboxPreTrainedModel''', '''JukeboxVQVAE''', '''JukeboxPrior''', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
0
'''simple docstring''' import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __snake_case = False __snake_case = logging.get_logger(__name__) __snake_case = '''ybelkada/fonts''' def a ( ) -> Optional[Any]: '''simple docstring''' if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( f'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ''' '''Pix2StructImageProcessor. Please upgrade torch.''' ) def a ( __a , __a , __a ) -> Optional[int]: '''simple docstring''' requires_backends(__a , ['''torch'''] ) _check_torch_version() UpperCamelCase__ :Tuple = image_tensor.unsqueeze(0 ) UpperCamelCase__ :Optional[int] = torch.nn.functional.unfold(__a , (patch_height, patch_width) , stride=(patch_height, patch_width) ) UpperCamelCase__ :Union[str, Any] = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , __a , __a , -1 ) UpperCamelCase__ :List[Any] = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def a ( __a , __a = 36 , __a = "black" , __a = "white" , __a = 5 , __a = 5 , __a = 5 , __a = 5 , __a = None , __a = None , ) -> Image.Image: '''simple docstring''' requires_backends(__a , '''vision''' ) # Add new lines so that each line is no more than 80 characters. UpperCamelCase__ :Union[str, Any] = textwrap.TextWrapper(width=80 ) UpperCamelCase__ :int = wrapper.wrap(text=__a ) UpperCamelCase__ :Union[str, Any] = '''\n'''.join(__a ) if font_bytes is not None and font_path is None: UpperCamelCase__ :str = io.BytesIO(__a ) elif font_path is not None: UpperCamelCase__ :int = font_path else: UpperCamelCase__ :int = hf_hub_download(__a , '''Arial.TTF''' ) UpperCamelCase__ :List[str] = ImageFont.truetype(__a , encoding='''UTF-8''' , size=__a ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. UpperCamelCase__ :Optional[int] = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , __a ) ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = temp_draw.textbbox((0, 0) , __a , __a ) # Create the actual image with a bit of padding around the text. UpperCamelCase__ :List[Any] = text_width + left_padding + right_padding UpperCamelCase__ :int = text_height + top_padding + bottom_padding UpperCamelCase__ :str = Image.new('''RGB''' , (image_width, image_height) , __a ) UpperCamelCase__ :Tuple = ImageDraw.Draw(__a ) draw.text(xy=(left_padding, top_padding) , text=__a , fill=__a , font=__a ) return image def a ( __a , __a , **__a ) -> Optional[Any]: '''simple docstring''' requires_backends(__a , '''vision''' ) # Convert to PIL image if necessary UpperCamelCase__ :Optional[int] = to_pil_image(__a ) UpperCamelCase__ :List[str] = render_text(__a , **__a ) UpperCamelCase__ :List[str] = max(header_image.width , image.width ) UpperCamelCase__ :List[str] = int(image.height * (new_width / image.width) ) UpperCamelCase__ :Dict = int(header_image.height * (new_width / header_image.width) ) UpperCamelCase__ :Dict = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary UpperCamelCase__ :Optional[int] = to_numpy_array(__a ) if infer_channel_dimension_format(__a ) == ChannelDimension.LAST: UpperCamelCase__ :int = to_channel_dimension_format(__a , ChannelDimension.LAST ) return new_image class lowercase ( A__ ): """simple docstring""" _a = ['flattened_patches'] def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = 2048 , UpperCamelCase_ = False , **UpperCamelCase_ , ): '''simple docstring''' super().__init__(**UpperCamelCase_ ) UpperCamelCase__ :List[Any] = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16} UpperCamelCase__ :Optional[int] = do_normalize UpperCamelCase__ :List[str] = do_convert_rgb UpperCamelCase__ :Union[str, Any] = max_patches UpperCamelCase__ :Optional[int] = is_vqa def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ): '''simple docstring''' requires_backends(self.extract_flattened_patches , '''torch''' ) _check_torch_version() # convert to torch UpperCamelCase__ :Union[str, Any] = to_channel_dimension_format(UpperCamelCase_ , ChannelDimension.FIRST ) UpperCamelCase__ :Dict = torch.from_numpy(UpperCamelCase_ ) UpperCamelCase__ , UpperCamelCase__ :Tuple = patch_size['''height'''], patch_size['''width'''] UpperCamelCase__ , UpperCamelCase__ :Any = get_image_size(UpperCamelCase_ ) # maximize scale s.t. UpperCamelCase__ :Tuple = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) UpperCamelCase__ :Tuple = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase_ ) , 1 ) UpperCamelCase__ :Optional[Any] = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase_ ) , 1 ) UpperCamelCase__ :str = max(num_feasible_rows * patch_height , 1 ) UpperCamelCase__ :Optional[int] = max(num_feasible_cols * patch_width , 1 ) UpperCamelCase__ :Optional[int] = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=UpperCamelCase_ , antialias=UpperCamelCase_ , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] UpperCamelCase__ :Optional[int] = torch_extract_patches(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase__ :List[Any] = patches.shape UpperCamelCase__ :Dict = patches_shape[1] UpperCamelCase__ :int = patches_shape[2] UpperCamelCase__ :List[str] = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] UpperCamelCase__ :int = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] UpperCamelCase__ :Union[str, Any] = torch.arange(UpperCamelCase_ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase_ ).reshape([rows * columns, 1] ) UpperCamelCase__ :str = torch.arange(UpperCamelCase_ ).reshape([1, columns] ).repeat(UpperCamelCase_ , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] UpperCamelCase__ :List[Any] = row_ids.to(torch.floataa ) UpperCamelCase__ :int = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] UpperCamelCase__ :Any = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] UpperCamelCase__ :Any = torch.nn.functional.pad(UpperCamelCase_ , [0, 0, 0, max_patches - (rows * columns)] ).float() UpperCamelCase__ :Union[str, Any] = to_numpy_array(UpperCamelCase_ ) return result def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ): '''simple docstring''' if image.dtype == np.uinta: UpperCamelCase__ :Dict = image.astype(np.floataa ) # take mean across the whole `image` UpperCamelCase__ :Optional[int] = np.mean(UpperCamelCase_ ) UpperCamelCase__ :str = np.std(UpperCamelCase_ ) UpperCamelCase__ :int = max(UpperCamelCase_ , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ): '''simple docstring''' UpperCamelCase__ :List[str] = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase__ :Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCamelCase__ :List[Any] = patch_size if patch_size is not None else self.patch_size UpperCamelCase__ :Any = max_patches if max_patches is not None else self.max_patches UpperCamelCase__ :Optional[int] = self.is_vqa if kwargs.get('''data_format''' , UpperCamelCase_ ) is not None: raise ValueError('''data_format is not an accepted input as the outputs are ''' ) UpperCamelCase__ :Dict = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCamelCase__ :List[str] = [convert_to_rgb(UpperCamelCase_ ) for image in images] # All transformations expect numpy arrays. UpperCamelCase__ :Optional[int] = [to_numpy_array(UpperCamelCase_ ) for image in images] if is_vqa: if header_text is None: raise ValueError('''A header text must be provided for VQA models.''' ) UpperCamelCase__ :Tuple = kwargs.pop('''font_bytes''' , UpperCamelCase_ ) UpperCamelCase__ :Any = kwargs.pop('''font_path''' , UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase__ :Any = [header_text] * len(UpperCamelCase_ ) UpperCamelCase__ :str = [ render_header(UpperCamelCase_ , header_text[i] , font_bytes=UpperCamelCase_ , font_path=UpperCamelCase_ ) for i, image in enumerate(UpperCamelCase_ ) ] if do_normalize: UpperCamelCase__ :Optional[int] = [self.normalize(image=UpperCamelCase_ ) for image in images] # convert to torch tensor and permute UpperCamelCase__ :Optional[Any] = [ self.extract_flattened_patches(image=UpperCamelCase_ , max_patches=UpperCamelCase_ , patch_size=UpperCamelCase_ ) for image in images ] # create attention mask in numpy UpperCamelCase__ :Optional[int] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] UpperCamelCase__ :List[str] = BatchFeature( data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=UpperCamelCase_ ) return encoded_outputs
97
'''simple docstring''' from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Union[str, Any] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None ): '''simple docstring''' super().__init__() _A = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" _A = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ) else: _A = None _A = torch.nn.Parameter(__UpperCAmelCase ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 def __init__( self : Any , __UpperCAmelCase : VQModel , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : TransformeraDModel , __UpperCAmelCase : VQDiffusionScheduler , __UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ): '''simple docstring''' super().__init__() self.register_modules( vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ): '''simple docstring''' _A = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1 # get prompt text embeddings _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) _A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) _A = text_input_ids[:, : self.tokenizer.model_max_length] _A = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 _A = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate text embeddings for each generation per prompt _A = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: _A = self.learned_classifier_free_sampling_embeddings.embeddings _A = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 ) else: _A = [""] * batch_size _A = text_input_ids.shape[-1] _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , ) _A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings _A = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _A = negative_prompt_embeds.shape[1] _A = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 ) _A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self : Optional[Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 5.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = 1 elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = len(__UpperCAmelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' ) _A = batch_size * num_images_per_prompt _A = guidance_scale > 1.0 _A = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(__UpperCAmelCase )}.''' ) # get the initial completely masked latents unless the user supplied it _A = (batch_size, self.transformer.num_latent_pixels) if latents is None: _A = self.transformer.num_vector_embeds - 1 _A = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) _A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device ) _A = self.scheduler.timesteps.to(self.device ) _A = latents for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ): # expand the sample if we are doing classifier free guidance _A = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` _A = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample if do_classifier_free_guidance: _A , _A = model_output.chunk(2 ) _A = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase ) _A = self.truncate(__UpperCAmelCase , __UpperCAmelCase ) # remove `log(0)`'s (`-inf`s) _A = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = self.vqvae.config.vq_embed_dim _A = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) _A = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase ) _A = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample _A = (image / 2 + 0.5).clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : float ): '''simple docstring''' _A , _A = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase ) _A = torch.exp(__UpperCAmelCase ) _A = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out _A = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase ) _A = torch.cat((all_true, keep_mask) , dim=1 ) _A = keep_mask[:, :-1, :] _A = keep_mask.gather(1 , indices.argsort(1 ) ) _A = log_p_x_0.clone() _A = -torch.inf # -inf = log(0) return rv
79
0
"""simple docstring""" import functools def a_ ( lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = len(lowerCamelCase ) UpperCAmelCase__ = len(lowerCamelCase ) @functools.cache def min_distance(lowerCamelCase , lowerCamelCase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase__ = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , lowerCamelCase ) , 1 + min_distance(lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
98
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def __lowercase ( __lowercase , __lowercase=False ) -> int: '''simple docstring''' _A = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: _A = "" else: _A = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[ : config.hidden_size, : ] _A = in_proj_bias[: config.hidden_size] _A = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _A = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _A = in_proj_weight[ -config.hidden_size :, : ] _A = in_proj_bias[-config.hidden_size :] def __lowercase ( __lowercase ) -> List[str]: '''simple docstring''' _A = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Tuple: '''simple docstring''' _A = dct.pop(__lowercase ) _A = val def __lowercase ( ) -> List[str]: '''simple docstring''' _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' _A = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=__lowercase , ) _A = ViTHybridConfig(backbone_config=__lowercase , image_size=384 , num_labels=1000 ) _A = False # load original model from timm _A = timm.create_model(__lowercase , pretrained=__lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _A = timm_model.state_dict() if base_model: remove_classification_head_(__lowercase ) _A = create_rename_keys(__lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase , __lowercase ) _A = "huggingface/label-files" _A = "imagenet-1k-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _A = ViTHybridModel(__lowercase ).eval() else: _A = ViTHybridForImageClassification(__lowercase ).eval() model.load_state_dict(__lowercase ) # create image processor _A = create_transform(**resolve_data_config({} , model=__lowercase ) ) _A = transform.transforms _A = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _A = ViTHybridImageProcessor( do_resize=__lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _A = prepare_img() _A = transform(__lowercase ).unsqueeze(0 ) _A = processor(__lowercase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__lowercase , __lowercase ) # verify logits with torch.no_grad(): _A = model(__lowercase ) _A = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: _A = timm_model.forward_features(__lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowercase , outputs.pooler_output , atol=1e-3 ) else: _A = timm_model(__lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__lowercase ) if push_to_hub: print(F'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(F'''ybelkada/{vit_name}''' ) processor.push_to_hub(F'''ybelkada/{vit_name}''' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_r50_s16_384''', type=str, help='''Name of the hybrid ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) lowerCamelCase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
79
0
import operator as op lowercase : List[Any] = """scaler.pt""" lowercase : int = """pytorch_model""" lowercase : Union[str, Any] = """random_states""" lowercase : Optional[Any] = """optimizer""" lowercase : Any = """scheduler""" lowercase : Optional[int] = """pytorch_model.bin""" lowercase : Optional[Any] = """pytorch_model.bin.index.json""" lowercase : Optional[int] = """model.safetensors""" lowercase : Any = """model.safetensors.index.json""" lowercase : Optional[Any] = """1.10.2""" lowercase : List[str] = """py38""" lowercase : str = """4.17.0""" lowercase : int = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""] lowercase : int = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""] lowercase : Optional[int] = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""] lowercase : str = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""] lowercase : Optional[Any] = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""] lowercase : Optional[int] = """2.0.1""" lowercase : Optional[Any] = ["""pdsh""", """standard""", """openmpi""", """mvapich"""] lowercase : List[Any] = ["""default""", """reduce-overhead""", """max-autotune"""] lowercase : List[Any] = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 lowercase : int = [ """nnodes""", """nproc_per_node""", """rdzv_backend""", """rdzv_endpoint""", """rdzv_id""", """rdzv_conf""", """standalone""", """max_restarts""", """monitor_interval""", """start_method""", """role""", """module""", """m""", """no_python""", """run_path""", """log_dir""", """r""", """redirects""", """t""", """tee""", """node_rank""", """master_addr""", """master_port""", ] lowercase : Optional[int] = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""] lowercase : int = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
99
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __magic_name__ = { "configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ "SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST", "Swinv2ForImageClassification", "Swinv2ForMaskedImageModeling", "Swinv2Model", "Swinv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
100
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets lowerCamelCase_ = datasets.logging.get_logger(__name__) lowerCamelCase_ = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = "{COMET}: A Neural Framework for {MT} Evaluation", author = "Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", pages = "2685--2702", } ''' lowerCamelCase_ = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' lowerCamelCase_ = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results["scores"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase ( self : int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "sources": datasets.Value("string" , id="sequence" ), "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[ "https://github.com/Unbabel/COMET", "https://www.aclweb.org/anthology/2020.emnlp-main.213/", "http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6", ] , ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ): '''simple docstring''' if self.config_name == "default": _A = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) ) else: _A = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowerCAmelCase ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=False ): '''simple docstring''' if gpus is None: _A = 1 if torch.cuda.is_available() else 0 _A = {"src": sources, "mt": predictions, "ref": references} _A = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for t in zip(*data.values() )] _A , _A = self.scorer.predict(__UpperCAmelCase , gpus=__UpperCAmelCase , progress_bar=__UpperCAmelCase ) return {"mean_score": mean_score, "scores": scores}
79
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowercase__ :str = logging.get_logger(__name__) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' lowercase = '''huggingface/label-files''' lowercase = '''imagenet-1k-id2label.json''' lowercase = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) ) lowercase = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()} lowercase = {v: k for k, v in idalabel.items()} lowercase = '''std_conv''' if '''bit''' in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" lowercase = BitConfig( conv_layer=lowerCAmelCase__ , num_labels=1000 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , ) return config def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' if "stem.conv" in name: lowercase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' ) if "blocks" in name: lowercase = name.replace('''blocks''' , '''layers''' ) if "head.fc" in name: lowercase = name.replace('''head.fc''' , '''classifier.1''' ) if name.startswith('''norm''' ): lowercase = '''bit.''' + name if "bit" not in name and "classifier" not in name: lowercase = '''bit.encoder.''' + name return name def UpperCamelCase ( ): '''simple docstring''' lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ) return im @torch.no_grad() def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ): '''simple docstring''' lowercase = get_config(lowerCAmelCase__ ) # load original model from timm lowercase = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ ) timm_model.eval() # load state_dict of original model lowercase = timm_model.state_dict() for key in state_dict.copy().keys(): lowercase = state_dict.pop(lowerCAmelCase__ ) lowercase = val.squeeze() if '''head''' in key else val # load HuggingFace model lowercase = BitForImageClassification(lowerCAmelCase__ ) model.eval() model.load_state_dict(lowerCAmelCase__ ) # create image processor lowercase = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) ) lowercase = transform.transforms lowercase = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } lowercase = BitImageProcessor( do_resize=lowerCAmelCase__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowercase = prepare_img() lowercase = transform(lowerCAmelCase__ ).unsqueeze(0 ) lowercase = processor(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values # verify pixel values assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) # verify logits with torch.no_grad(): lowercase = model(lowerCAmelCase__ ) lowercase = outputs.logits print('''Logits:''' , logits[0, :3] ) print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] ) lowercase = timm_model(lowerCAmelCase__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) if push_to_hub: print(f'Pushing model {model_name} and processor to the hub' ) model.push_to_hub(f'ybelkada/{model_name}' ) processor.push_to_hub(f'ybelkada/{model_name}' ) if __name__ == "__main__": lowercase__ :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="resnetv2_50x1_bitm", type=str, help="Name of the BiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the hub.", ) lowercase__ :List[str] = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
101
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase , __lowercase = None , __lowercase = None ) -> None: '''simple docstring''' if start is None: _A = 0 if end is None: _A = len(__lowercase ) - 1 if start >= end: return _A = (start + end) // 2 slowsort(__lowercase , __lowercase , __lowercase ) slowsort(__lowercase , mid + 1 , __lowercase ) if sequence[end] < sequence[mid]: _A , _A = sequence[mid], sequence[end] slowsort(__lowercase , __lowercase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
79
0
"""simple docstring""" from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance SCREAMING_SNAKE_CASE : List[str] = 6_378_137.0 SCREAMING_SNAKE_CASE : Tuple = 6_356_752.314_245 SCREAMING_SNAKE_CASE : Dict = 637_8137 def lowercase ( _snake_case : float , _snake_case : float , _snake_case : float , _snake_case : float ) ->float: """simple docstring""" __snake_case : Any = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude __snake_case : List[Any] = atan((1 - flattening) * tan(radians(_snake_case ) ) ) __snake_case : str = atan((1 - flattening) * tan(radians(_snake_case ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius __snake_case : Union[str, Any] = haversine_distance(_snake_case , _snake_case , _snake_case , _snake_case ) / EQUATORIAL_RADIUS # Intermediate P and Q values __snake_case : Union[str, Any] = (b_lata + b_lata) / 2 __snake_case : Any = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) __snake_case : List[str] = (sin(_snake_case ) ** 2) * (cos(_snake_case ) ** 2) __snake_case : Optional[Any] = cos(sigma / 2 ) ** 2 __snake_case : Optional[int] = (sigma - sin(_snake_case )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) __snake_case : Optional[Any] = (cos(_snake_case ) ** 2) * (sin(_snake_case ) ** 2) __snake_case : List[Any] = sin(sigma / 2 ) ** 2 __snake_case : Optional[int] = (sigma + sin(_snake_case )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
102
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : """simple docstring""" snake_case = PegasusConfig snake_case = {} snake_case = '''gelu''' def __init__( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : int=7 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : str=False , __UpperCAmelCase : Union[str, Any]=99 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Tuple=37 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=40 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Any=0 , ): '''simple docstring''' _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = eos_token_id _A = pad_token_id _A = bos_token_id def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A = tf.concat([input_ids, eos_tensor] , axis=1 ) _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _A = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, inputs_dict def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ): '''simple docstring''' _A = TFPegasusModel(config=__UpperCAmelCase ).get_decoder() _A = inputs_dict["input_ids"] _A = input_ids[:1, :] _A = inputs_dict["attention_mask"][:1, :] _A = inputs_dict["head_mask"] _A = 1 # first forward pass _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) _A , _A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _A = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _A = tf.concat([input_ids, next_tokens] , axis=-1 ) _A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _A = output_from_no_past[:, -3:, random_slice_idx] _A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: _A = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () snake_case = (TFPegasusForConditionalGeneration,) if is_tf_available() else () snake_case = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) snake_case = True snake_case = False snake_case = False def lowerCAmelCase ( self : str ): '''simple docstring''' _A = TFPegasusModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] snake_case = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers snake_case = '''google/pegasus-xsum''' @cached_property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.translate_src_text(**__UpperCAmelCase ) assert self.expected_text == generated_words def lowerCAmelCase ( self : Dict , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.tokenizer(self.src_text , **__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="tf" ) _A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , ) _A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase ) return generated_words @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self._assert_generated_batch_equal_expected()
79
0
import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values A__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--user''', type=str, default='''ubuntu''') parser.add_argument('''--host''', type=str, default='''localhost''') parser.add_argument('''--key_path''', type=str, default=None) parser.add_argument('''--instance''', type=str, default='''V100:1''') parser.add_argument('''--provider''', type=str, default='''cheapest''') parser.add_argument('''--use_spot''', type=bool, default=False) parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''') A__ , A__ : str = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('''Cannot specify both BYO and on-demand cluster args''') A__ : str = rh.cluster( name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path} ) else: A__ : int = rh.cluster( name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) A__ : Optional[Any] = args.example.rsplit('''/''', 1)[0] # Set up remote environment cluster.install_packages(['''pip:./''']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt''']) cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117''']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([F'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}''']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
103
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : int=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=99 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=None , ): '''simple docstring''' _A = parent _A = 13 _A = 7 _A = True _A = True _A = True _A = True _A = 99 _A = 32 _A = 2 _A = 4 _A = 37 _A = "gelu" _A = 0.1 _A = 0.1 _A = 512 _A = 16 _A = 2 _A = 0.02 _A = 3 _A = 4 _A = None def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = TFRoFormerModel(config=__UpperCAmelCase ) _A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _A = [input_ids, input_mask] _A = model(__UpperCAmelCase ) _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = True _A = TFRoFormerForCausalLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase )["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ): '''simple docstring''' _A = TFRoFormerForMaskedLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForSequenceClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = self.num_choices _A = TFRoFormerForMultipleChoice(config=__UpperCAmelCase ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForTokenClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' _A = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) snake_case = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) snake_case = False snake_case = False def lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) _A = tf.constant([[0, 1, 2, 3, 4, 5]] ) _A = model(__UpperCAmelCase )[0] # TODO Replace vocab size _A = 50000 _A = [1, 6, vocab_size] self.assertEqual(output.shape , __UpperCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _A = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = tf.constant([[4, 10]] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _A = emba(input_ids.shape ) _A = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) _A = emba.weight[:3, :5] tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : str ): '''simple docstring''' _A = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) _A = embed_positions([2, 16, 768] )[None, None, :, :] _A , _A = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) _A = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
79
0
'''simple docstring''' def _A ( A__ , A__ , A__ ): """simple docstring""" def update_area_of_max_square(A__ , A__ ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 __lowercase = update_area_of_max_square(A__ , col + 1 ) __lowercase = update_area_of_max_square(row + 1 , col + 1 ) __lowercase = update_area_of_max_square(row + 1 , A__ ) if mat[row][col]: __lowercase = 1 + min([right, diagonal, down] ) __lowercase = max(largest_square_area[0] , A__ ) return sub_problem_sol else: return 0 __lowercase = [0] update_area_of_max_square(0 , 0 ) return largest_square_area[0] def _A ( A__ , A__ , A__ ): """simple docstring""" def update_area_of_max_square_using_dp_array( A__ , A__ , A__ ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] __lowercase = update_area_of_max_square_using_dp_array(A__ , col + 1 , A__ ) __lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , A__ ) __lowercase = update_area_of_max_square_using_dp_array(row + 1 , A__ , A__ ) if mat[row][col]: __lowercase = 1 + min([right, diagonal, down] ) __lowercase = max(largest_square_area[0] , A__ ) __lowercase = sub_problem_sol return sub_problem_sol else: return 0 __lowercase = [0] __lowercase = [[-1] * cols for _ in range(A__ )] update_area_of_max_square_using_dp_array(0 , 0 , A__ ) return largest_square_area[0] def _A ( A__ , A__ , A__ ): """simple docstring""" __lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )] __lowercase = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): __lowercase = dp_array[row][col + 1] __lowercase = dp_array[row + 1][col + 1] __lowercase = dp_array[row + 1][col] if mat[row][col] == 1: __lowercase = 1 + min(A__ , A__ , A__ ) __lowercase = max(dp_array[row][col] , A__ ) else: __lowercase = 0 return largest_square_area def _A ( A__ , A__ , A__ ): """simple docstring""" __lowercase = [0] * (cols + 1) __lowercase = [0] * (cols + 1) __lowercase = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): __lowercase = current_row[col + 1] __lowercase = next_row[col + 1] __lowercase = next_row[col] if mat[row][col] == 1: __lowercase = 1 + min(A__ , A__ , A__ ) __lowercase = max(current_row[col] , A__ ) else: __lowercase = 0 __lowercase = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
104
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''gpt_neox''' def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=50432 , __UpperCAmelCase : Any=6144 , __UpperCAmelCase : List[str]=44 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : List[str]=24576 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-5 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ): '''simple docstring''' super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = vocab_size _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = rotary_pct _A = rotary_emb_base _A = attention_dropout _A = hidden_dropout _A = classifier_dropout _A = initializer_range _A = layer_norm_eps _A = use_cache _A = tie_word_embeddings _A = use_parallel_residual _A = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'''got {self.rope_scaling}''' ) _A = self.rope_scaling.get("type" , __UpperCAmelCase ) _A = self.rope_scaling.get("factor" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
79
0
"""simple docstring""" import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 a : str = get_tests_dir('''fixtures/dummy-config.json''') class __UpperCamelCase ( unittest.TestCase ): def __a ( self ) -> Any: a : Tuple = 0 def __a ( self ) -> Union[str, Any]: self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) ) def __a ( self ) -> Tuple: a : str = AutoConfig.from_pretrained("bert-base-uncased" ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> int: a : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Dict: a : int = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Optional[Any]: a : List[str] = AutoConfig.for_model("roberta" ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> List[Any]: with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. a : Any = os.path.join(lowerCAmelCase__ , "fake-roberta" ) os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) with open(os.path.join(lowerCAmelCase__ , "config.json" ) , "w" ) as f: f.write(json.dumps({} ) ) a : str = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(type(lowerCAmelCase__ ) , lowerCAmelCase__ ) def __a ( self ) -> Dict: try: AutoConfig.register("custom" , lowerCAmelCase__ ) # Wrong model type will raise an error with self.assertRaises(lowerCAmelCase__ ): AutoConfig.register("model" , lowerCAmelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCAmelCase__ ): AutoConfig.register("bert" , lowerCAmelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API a : List[Any] = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCAmelCase__ ) a : int = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def __a ( self ) -> Optional[Any]: with self.assertRaisesRegex( lowerCAmelCase__ , "bert-base is not a local folder and is not a valid model identifier" ): a : str = AutoConfig.from_pretrained("bert-base" ) def __a ( self ) -> Optional[int]: with self.assertRaisesRegex( lowerCAmelCase__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): a : str = AutoConfig.from_pretrained(lowerCAmelCase__ , revision="aaaaaa" ) def __a ( self ) -> Tuple: with self.assertRaisesRegex( lowerCAmelCase__ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ): a : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" ) def __a ( self ) -> int: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowerCAmelCase__ ): a : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCAmelCase__ ): a : Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ ) a : Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ ) self.assertEqual(config.__class__.__name__ , "NewModelConfig" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCAmelCase__ ) a : int = AutoConfig.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ ) self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" ) def __a ( self ) -> Tuple: class __UpperCamelCase ( a__ ): lowerCamelCase : List[str] ="""new-model""" try: AutoConfig.register("new-model" , lowerCAmelCase__ ) # If remote code is not set, the default is to use local a : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ) self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" ) # If remote code is disabled, we load the local one. a : str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ ) self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" ) # If remote is enabled, we load from the Hub a : Tuple = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ ) self.assertEqual(config.__class__.__name__ , "NewModelConfig" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
105
'''simple docstring''' from PIL import Image def __lowercase ( __lowercase , __lowercase ) -> Image: '''simple docstring''' _A = (259 * (level + 255)) / (255 * (259 - level)) def contrast(__lowercase ) -> int: return int(128 + factor * (c - 128) ) return img.point(__lowercase ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 lowerCamelCase_ = change_contrast(img, 1_70) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
79
0
"""simple docstring""" import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed __UpperCamelCase : List[str] = '''true''' def __SCREAMING_SNAKE_CASE ( A_ , A_=82 , A_=16 ): set_seed(42 ) lowerCAmelCase__ : Union[str, Any] = RegressionModel() lowerCAmelCase__ : Optional[int] = deepcopy(A_ ) lowerCAmelCase__ : Any = RegressionDataset(length=A_ ) lowerCAmelCase__ : List[str] = DataLoader(A_ , batch_size=A_ ) model.to(accelerator.device ) lowerCAmelCase__ ,lowerCAmelCase__ : Dict = accelerator.prepare(A_ , A_ ) return model, ddp_model, dataloader def __SCREAMING_SNAKE_CASE ( A_ , A_=False ): lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' ) lowerCAmelCase__ : List[str] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' ) def tokenize_function(A_ ): lowerCAmelCase__ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A_ , max_length=A_ ) return outputs with accelerator.main_process_first(): lowerCAmelCase__ : Dict = dataset.map( A_ , batched=A_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) lowerCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(A_ ): if use_longest: return tokenizer.pad(A_ , padding='''longest''' , return_tensors='''pt''' ) return tokenizer.pad(A_ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' ) return DataLoader(A_ , shuffle=A_ , collate_fn=A_ , batch_size=16 ) def __SCREAMING_SNAKE_CASE ( A_ , A_ ): lowerCAmelCase__ : Union[str, Any] = Accelerator(dispatch_batches=A_ , split_batches=A_ ) lowerCAmelCase__ : str = get_dataloader(A_ , not dispatch_batches ) lowerCAmelCase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained( '''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=A_ ) lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = accelerator.prepare(A_ , A_ ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ): lowerCAmelCase__ : Union[str, Any] = [] for batch in dataloader: lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = batch.values() with torch.no_grad(): lowerCAmelCase__ : List[str] = model(A_ ) lowerCAmelCase__ ,lowerCAmelCase__ : str = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) lowerCAmelCase__ ,lowerCAmelCase__ : int = [], [] for logit, targ in logits_and_targets: logits.append(A_ ) targs.append(A_ ) lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = torch.cat(A_ ), torch.cat(A_ ) return logits, targs def __SCREAMING_SNAKE_CASE ( A_ , A_=82 , A_=False , A_=False , A_=16 ): lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = get_basic_setup(A_ , A_ , A_ ) lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = generate_predictions(A_ , A_ , A_ ) assert ( len(A_ ) == num_samples ), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(A_ )}' def __SCREAMING_SNAKE_CASE ( A_ = False , A_ = False ): lowerCAmelCase__ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' ) lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = get_mrpc_setup(A_ , A_ ) # First do baseline lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = setup['''no'''] model.to(A_ ) model.eval() for batch in dataloader: batch.to(A_ ) with torch.inference_mode(): lowerCAmelCase__ : Optional[int] = model(**A_ ) lowerCAmelCase__ : Dict = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=A_ , references=batch['''labels'''] ) lowerCAmelCase__ : Dict = metric.compute() # Then do distributed lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = setup['''ddp'''] model.eval() for batch in dataloader: with torch.inference_mode(): lowerCAmelCase__ : Union[str, Any] = model(**A_ ) lowerCAmelCase__ : int = outputs.logits.argmax(dim=-1 ) lowerCAmelCase__ : int = batch['''labels'''] lowerCAmelCase__ ,lowerCAmelCase__ : int = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=A_ , references=A_ ) lowerCAmelCase__ : List[Any] = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n' def __SCREAMING_SNAKE_CASE ( ): lowerCAmelCase__ : List[str] = Accelerator(split_batches=A_ , dispatch_batches=A_ ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('''**Testing gather_for_metrics**''' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' ) test_mrpc(A_ , A_ ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test torch metrics**''' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: lowerCAmelCase__ : Optional[Any] = Accelerator(split_batches=A_ , dispatch_batches=A_ ) if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' ) test_torch_metrics(A_ , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test last batch is not dropped when perfectly divisible**''' ) lowerCAmelCase__ : List[str] = Accelerator() test_torch_metrics(A_ , 5_12 ) accelerator.state._reset_state() def __SCREAMING_SNAKE_CASE ( A_ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
106
'''simple docstring''' def __lowercase ( __lowercase ) -> int: '''simple docstring''' assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: _A = F'''The input value of [n={number}] has to be > 0''' raise ValueError(__lowercase ) else: _A = sylvester(number - 1 ) _A = num - 1 _A = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
79
0
__lowerCAmelCase : Tuple = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def __magic_name__ ( ): '''simple docstring''' a = input("Enter message: " ) a = input("Enter key [alphanumeric]: " ) a = input("Encrypt/Decrypt [e/d]: " ) if mode.lower().startswith("e" ): a = "encrypt" a = encrypt_message(A, A ) elif mode.lower().startswith("d" ): a = "decrypt" a = decrypt_message(A, A ) print(F"""\n{mode.title()}ed message:""" ) print(A ) def __magic_name__ ( A : str, A : str ): '''simple docstring''' return translate_message(A, A, "encrypt" ) def __magic_name__ ( A : str, A : str ): '''simple docstring''' return translate_message(A, A, "decrypt" ) def __magic_name__ ( A : str, A : str, A : str ): '''simple docstring''' a = [] a = 0 a = key.upper() for symbol in message: a = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(A ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(A ): a = 0 else: translated.append(A ) return "".join(A ) if __name__ == "__main__": main()
107
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowerCamelCase_ = logging.getLogger(__name__) def __lowercase ( __lowercase , __lowercase ) -> Optional[int]: '''simple docstring''' if os.path.exists(__lowercase ): if os.path.exists(os.path.join(__lowercase , "config.json" ) ) and os.path.isfile( os.path.join(__lowercase , "config.json" ) ): os.remove(os.path.join(__lowercase , "config.json" ) ) if os.path.exists(os.path.join(__lowercase , "pytorch_model.bin" ) ) and os.path.isfile( os.path.join(__lowercase , "pytorch_model.bin" ) ): os.remove(os.path.join(__lowercase , "pytorch_model.bin" ) ) else: os.makedirs(__lowercase ) model.save_pretrained(__lowercase ) def __lowercase ( __lowercase , __lowercase=False ) -> Optional[int]: '''simple docstring''' _A = 2 if unlogit: _A = torch.pow(__lowercase , __lowercase ) _A = p * torch.log(__lowercase ) _A = 0 return -plogp.sum(dim=-1 ) def __lowercase ( __lowercase ) -> Optional[Any]: '''simple docstring''' logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(__lowercase ) ) ) ) for row in range(len(__lowercase ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=True , __lowercase=True , __lowercase=None , __lowercase=False ) -> int: '''simple docstring''' _A , _A = model.config.num_hidden_layers, model.config.num_attention_heads _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) if head_mask is None: _A = torch.ones(__lowercase , __lowercase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowercase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _A = None _A = 0.0 _A = 0.0 for step, inputs in enumerate(tqdm(__lowercase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ): _A = tuple(t.to(args.device ) for t in inputs ) ((_A) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _A = model(__lowercase , labels=__lowercase , head_mask=__lowercase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _A , _A , _A = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowercase ): _A = entropy(attn.detach() , __lowercase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowercase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _A = 2 _A = torch.pow(torch.pow(__lowercase , __lowercase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: _A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("Attention entropies" ) print_ad_tensor(__lowercase ) if compute_importance: logger.info("Head importance scores" ) print_ad_tensor(__lowercase ) logger.info("Head ranked by importance scores" ) _A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _A = torch.arange( head_importance.numel() , device=args.device ) _A = head_ranks.view_as(__lowercase ) print_ad_tensor(__lowercase ) return attn_entropy, head_importance, total_loss def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A , _A , _A = compute_heads_importance(__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase ) _A = 1 / loss # instead of downsteam score use the LM loss logger.info("Pruning: original score: %f, threshold: %f" , __lowercase , original_score * args.masking_threshold ) _A = torch.ones_like(__lowercase ) _A = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _A = original_score while current_score >= original_score * args.masking_threshold: _A = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _A = float("Inf" ) _A = head_importance.view(-1 ).sort()[1] if len(__lowercase ) <= num_to_mask: print("BREAK BY num_to_mask" ) break # mask heads _A = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) ) _A = new_head_mask.view(-1 ) _A = 0.0 _A = new_head_mask.view_as(__lowercase ) _A = new_head_mask.clone().detach() print_ad_tensor(__lowercase ) # Compute metric and head importance again _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , head_mask=__lowercase ) _A = 1 / loss logger.info( "Masking: current score: %f, remaining heads %d (%.1f percents)" , __lowercase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info("Final head mask" ) print_ad_tensor(__lowercase ) np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() ) return head_mask def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase ) _A = 1 / loss _A = datetime.now() - before_time _A = sum(p.numel() for p in model.parameters() ) _A = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowercase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowercase , __lowercase ): _A = [ v, ] assert sum(len(__lowercase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowercase ) _A = sum(p.numel() for p in model.parameters() ) _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase , actually_pruned=__lowercase , ) _A = 1 / loss _A = datetime.now() - before_time logger.info( "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __lowercase , __lowercase , pruned_num_params / original_num_params * 100 , ) logger.info("Pruning: score with masking: %f score with pruning: %f" , __lowercase , __lowercase ) logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 ) save_model(__lowercase , args.output_dir ) def __lowercase ( ) -> Union[str, Any]: '''simple docstring''' _A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , ) parser.add_argument( "--model_name_or_path" , default=__lowercase , type=__lowercase , required=__lowercase , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--output_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The output directory where the model predictions and checkpoints will be written." , ) # Other parameters parser.add_argument( "--config_name" , default="" , type=__lowercase , help="Pretrained config name or path if not the same as model_name_or_path" , ) parser.add_argument( "--tokenizer_name" , default="" , type=__lowercase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , ) parser.add_argument( "--cache_dir" , default=__lowercase , type=__lowercase , help="Where do you want to store the pre-trained models downloaded from s3" , ) parser.add_argument( "--data_subset" , type=__lowercase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." ) parser.add_argument( "--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" ) parser.add_argument( "--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , ) parser.add_argument( "--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." ) parser.add_argument( "--masking_threshold" , default=0.9 , type=__lowercase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , ) parser.add_argument( "--masking_amount" , default=0.1 , type=__lowercase , help="Amount to heads to masking at each masking step." ) parser.add_argument("--metric_name" , default="acc" , type=__lowercase , help="Metric to use for head masking." ) parser.add_argument( "--max_seq_length" , default=128 , type=__lowercase , help=( "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, sequences shorter padded." ) , ) parser.add_argument("--batch_size" , default=1 , type=__lowercase , help="Batch size." ) parser.add_argument("--seed" , type=__lowercase , default=42 ) parser.add_argument("--local_rank" , type=__lowercase , default=-1 , help="local_rank for distributed training on gpus" ) parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" ) parser.add_argument("--server_ip" , type=__lowercase , default="" , help="Can be used for distant debugging." ) parser.add_argument("--server_port" , type=__lowercase , default="" , help="Can be used for distant debugging." ) _A = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowercase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _A = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" ) _A = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _A = torch.device("cuda" , args.local_rank ) _A = 1 torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _A = nn.parallel.DistributedDataParallel( __lowercase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowercase ) elif args.n_gpu > 1: _A = nn.DataParallel(__lowercase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowercase ) torch.save(__lowercase , os.path.join(args.output_dir , "run_args.bin" ) ) logger.info("Training/evaluation parameters %s" , __lowercase ) # Prepare dataset _A = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _A = (torch.from_numpy(__lowercase ),) _A = TensorDataset(*__lowercase ) _A = RandomSampler(__lowercase ) _A = DataLoader(__lowercase , sampler=__lowercase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowercase , __lowercase , __lowercase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _A = mask_heads(__lowercase , __lowercase , __lowercase ) prune_heads(__lowercase , __lowercase , __lowercase , __lowercase ) if __name__ == "__main__": main()
79
0
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow lowerCAmelCase__ = logging.getLogger() @unittest.skip("Temporarily disable the doc tests." ) @require_torch @require_tf @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def lowercase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = True , ): """simple docstring""" lowerCAmelCase : Tuple = [file for file in os.listdir(snake_case__ ) if os.path.isfile(os.path.join(snake_case__ , snake_case__ ) )] if identifier is not None: lowerCAmelCase : Dict = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(snake_case__ , snake_case__ ): for n_ in n_identifier: lowerCAmelCase : Optional[int] = [file for file in files if n_ not in file] else: lowerCAmelCase : Union[str, Any] = [file for file in files if n_identifier not in file] lowerCAmelCase : Union[str, Any] = ignore_files or [] ignore_files.append("__init__.py" ) lowerCAmelCase : int = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing" , snake_case__ ) if only_modules: lowerCAmelCase : str = file.split("." )[0] try: lowerCAmelCase : List[Any] = getattr(snake_case__ , snake_case__ ) lowerCAmelCase : int = doctest.DocTestSuite(snake_case__ ) lowerCAmelCase : int = unittest.TextTestRunner().run(snake_case__ ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f"""{module_identifier} is not a module.""" ) else: lowerCAmelCase : str = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : str = Path("src/transformers" ) lowerCAmelCase : str = "modeling" lowerCAmelCase : Optional[Any] = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(snake_case__ , identifier=snake_case__ , ignore_files=snake_case__ ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Tuple = Path("src/transformers" ) lowerCAmelCase : List[str] = "tokenization" self.analyze_directory(snake_case__ , identifier=snake_case__ ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Optional[Any] = Path("src/transformers" ) lowerCAmelCase : List[Any] = "configuration" self.analyze_directory(snake_case__ , identifier=snake_case__ ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : List[Any] = Path("src/transformers" ) lowerCAmelCase : List[str] = ["configuration", "modeling", "tokenization"] self.analyze_directory(snake_case__ , n_identifier=snake_case__ ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Dict = Path("docs/source" ) lowerCAmelCase : int = ["favicon.ico"] self.analyze_directory(snake_case__ , ignore_files=snake_case__ , only_modules=snake_case__ )
108
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = CycleDiffusionPipeline snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''} snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) _A = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _A = CLIPTextModel(__UpperCAmelCase ) _A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=0 ): '''simple docstring''' _A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _A = image / 2 + 0.5 if str(__UpperCAmelCase ).startswith("mps" ): _A = torch.manual_seed(__UpperCAmelCase ) else: _A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _A = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = "cpu" # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.get_dummy_components() for name, module in components.items(): if hasattr(__UpperCAmelCase , "half" ): _A = module.half() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def lowerCAmelCase ( self : Any ): '''simple docstring''' return super().test_save_load_local() @unittest.skip("non-deterministic pipeline" ) def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_inference_batch_single_identical() @skip_mps def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained( __UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images assert np.abs(image - expected_image ).max() < 2E-2
79
0
"""simple docstring""" import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Optional[int] = ort.SessionOptions() UpperCAmelCase : Dict = False return options def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) UpperCAmelCase : Any = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) UpperCAmelCase : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" ) # using the PNDM scheduler by default UpperCAmelCase : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = """A red cat sitting on a park bench""" UpperCAmelCase : str = np.random.RandomState(0 ) UpperCAmelCase : Any = pipe( prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , ) UpperCAmelCase : Optional[int] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-2
109
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
0
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return EnvironmentCommand() class _a ( UpperCamelCase__ ): @staticmethod def lowerCamelCase_ ( UpperCamelCase_: ArgumentParser ) -> str: """simple docstring""" lowercase__ = parser.add_parser('''env''' ) download_parser.set_defaults(func=UpperCamelCase_ ) def lowerCamelCase_ ( self: Union[str, Any] ) -> Tuple: """simple docstring""" lowercase__ = huggingface_hub.__version__ lowercase__ = '''not installed''' lowercase__ = '''NA''' if is_torch_available(): import torch lowercase__ = torch.__version__ lowercase__ = torch.cuda.is_available() lowercase__ = '''not installed''' if is_transformers_available(): import transformers lowercase__ = transformers.__version__ lowercase__ = '''not installed''' if is_accelerate_available(): import accelerate lowercase__ = accelerate.__version__ lowercase__ = '''not installed''' if is_xformers_available(): import xformers lowercase__ = xformers.__version__ lowercase__ = { '''`diffusers` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''PyTorch version (GPU?)''': f'{pt_version} ({pt_cuda_available})', '''Huggingface_hub version''': hub_version, '''Transformers version''': transformers_version, '''Accelerate version''': accelerate_version, '''xFormers version''': xformers_version, '''Using GPU in script?''': '''<fill in>''', '''Using distributed or parallel set-up in script?''': '''<fill in>''', } print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' ) print(self.format_dict(UpperCamelCase_ ) ) return info @staticmethod def lowerCamelCase_ ( UpperCamelCase_: Dict ) -> Optional[int]: """simple docstring""" return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
110
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCamelCase_ = get_logger(__name__) class _UpperCAmelCase : """simple docstring""" snake_case = '''dummy_data''' snake_case = '''datasets''' snake_case = False def __init__( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Union[Version, str] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[List[Callable]] = None , ): '''simple docstring''' _A = 0 _A = dataset_name _A = cache_dir _A = use_local_dummy_data _A = config # download_callbacks take a single url as input _A = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _A = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _A = str(__UpperCAmelCase ) # to be downloaded _A = None _A = None @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self._dummy_file is None: _A = self.download_dummy_data() return self._dummy_file @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _A = cached_path( __UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase ) return os.path.join(__UpperCAmelCase , self.dummy_file_name ) @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' if self._bucket_url is None: _A = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def lowerCAmelCase ( self : str ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , *__UpperCAmelCase : Dict ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _A = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _A = self.dummy_file_name # special case when data_url is a dict if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase ) elif isinstance(__UpperCAmelCase , (list, tuple) ): return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase ) else: return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : Any ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : List[str] ): '''simple docstring''' return path def lowerCAmelCase ( self : str ): '''simple docstring''' return {} def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): for single_url in single_urls: download_callback(__UpperCAmelCase ) else: _A = single_urls download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) for x in single_urls] else: _A = single_urls _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) _A = value # make sure that values are unique if all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _A = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __UpperCAmelCase ) ) for url in data_url ) _A = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _A = [data_url[0]] * len(__UpperCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__UpperCAmelCase ) return dummy_data_list def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__UpperCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass def lowerCAmelCase ( self : Dict ): '''simple docstring''' pass def lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' def _iter_archive_members(__UpperCAmelCase : List[Any] ): # this preserves the order of the members inside the ZIP archive _A = Path(self.dummy_file ).parent _A = path.relative_to(__UpperCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _A = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__UpperCAmelCase ) _A = Path(__UpperCAmelCase ) _A = _iter_archive_members(__UpperCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__UpperCAmelCase ).as_posix(), file_path.open("rb" ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [paths] for path in paths: if os.path.isfile(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__UpperCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__UpperCAmelCase , __UpperCAmelCase )
79
0
def UpperCamelCase ( snake_case__ : Dict ) -> list: if n_term == "": return [] UpperCamelCase : str = [] for temp in range(int(__lowercase ) ): series.append(F"""1/{temp + 1}""" if series else '1' ) return series if __name__ == "__main__": __UpperCAmelCase = input('''Enter the last number (nth term) of the Harmonic Series''') print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''') print(harmonic_series(nth_term))
119
'''simple docstring''' def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]: '''simple docstring''' if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ): _A = len(set_a.intersection(__lowercase ) ) if alternative_union: _A = len(__lowercase ) + len(__lowercase ) else: _A = len(set_a.union(__lowercase ) ) return intersection / union if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ): _A = [element for element in set_a if element in set_b] if alternative_union: _A = len(__lowercase ) + len(__lowercase ) return len(__lowercase ) / union else: _A = set_a + [element for element in set_b if element not in set_a] return len(__lowercase ) / len(__lowercase ) return len(__lowercase ) / len(__lowercase ) return None if __name__ == "__main__": lowerCamelCase_ = {'''a''', '''b''', '''c''', '''d''', '''e'''} lowerCamelCase_ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''} print(jaccard_similarity(set_a, set_b))
79
0
"""simple docstring""" from typing import List import numpy as np def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(F"\t- key {key} has length {length}" for key, length in lists_lengths.items() ) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) UpperCamelCase = max(lists_lengths.values() , default=0 ) return max(1 , __lowercase ) def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = [] for group_idx in range(__lowercase ): UpperCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break UpperCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 UpperCamelCase = range(__lowercase , start + num_shards_to_add ) shards_indices_per_group.append(__lowercase ) return shards_indices_per_group def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = _number_of_shards_in_gen_kwargs(__lowercase ) if num_shards == 1: return [dict(__lowercase )] else: UpperCamelCase = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__lowercase , __lowercase ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__lowercase ) ) ] def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __lowercase ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )} UpperCamelCase = {} for size in list_sizes: UpperCamelCase = list(range(__lowercase ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes UpperCamelCase = dict(__lowercase ) for key, value in shuffled_kwargs.items(): if isinstance(__lowercase , __lowercase ): UpperCamelCase = [value[i] for i in indices_per_size[len(__lowercase )]] return shuffled_kwargs
153
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 0 snake_case = False snake_case = 3.0 class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def lowerCAmelCase ( self : int ): '''simple docstring''' _A = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _A = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _A = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , __UpperCAmelCase ) @require_multi_gpu def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowerCamelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCamelCase_ = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCamelCase_ = torch.nn.Linear(1_00, 2_00) lowerCamelCase_ = accelerator.prepare(model) # Check the values changed in kwargs lowerCamelCase_ = '''''' lowerCamelCase_ = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
79
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def snake_case ( self ): """simple docstring""" lowerCamelCase_ = 1 lowerCamelCase_ = 3 lowerCamelCase_ = (32, 32) lowerCamelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase ) return image @property def snake_case ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) return model @property def snake_case ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def snake_case ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , ) return RobertaSeriesModelWithTransformation(__UpperCAmelCase ) @property def snake_case ( self ): """simple docstring""" def extract(*UpperCamelCase , **UpperCamelCase ): class snake_case : """simple docstring""" def __init__( self ): """simple docstring""" lowerCamelCase_ = torch.ones([0] ) def snake_case ( self , UpperCamelCase ): """simple docstring""" self.pixel_values.to(__UpperCAmelCase ) return self return Out() return extract def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ = self.dummy_cond_unet lowerCamelCase_ = PNDMScheduler(skip_prk_steps=__UpperCAmelCase ) lowerCamelCase_ = self.dummy_vae lowerCamelCase_ = self.dummy_text_encoder lowerCamelCase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) lowerCamelCase_ = 77 lowerCamelCase_ = self.dummy_image.to(__UpperCAmelCase ) lowerCamelCase_ = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk lowerCamelCase_ = AltDiffusionImgaImgPipeline( unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=self.dummy_extractor , ) lowerCamelCase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__UpperCAmelCase ) lowerCamelCase_ = alt_pipe.to(__UpperCAmelCase ) alt_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCamelCase_ = "A painting of a squirrel eating a burger" lowerCamelCase_ = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) lowerCamelCase_ = alt_pipe( [prompt] , generator=__UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__UpperCAmelCase , ) lowerCamelCase_ = output.images lowerCamelCase_ = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) lowerCamelCase_ = alt_pipe( [prompt] , generator=__UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__UpperCAmelCase , return_dict=__UpperCAmelCase , )[0] lowerCamelCase_ = image[0, -3:, -3:, -1] lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.dummy_cond_unet lowerCamelCase_ = PNDMScheduler(skip_prk_steps=__UpperCAmelCase ) lowerCamelCase_ = self.dummy_vae lowerCamelCase_ = self.dummy_text_encoder lowerCamelCase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) lowerCamelCase_ = 77 lowerCamelCase_ = self.dummy_image.to(__UpperCAmelCase ) # put models in fp16 lowerCamelCase_ = unet.half() lowerCamelCase_ = vae.half() lowerCamelCase_ = bert.half() # make sure here that pndm scheduler skips prk lowerCamelCase_ = AltDiffusionImgaImgPipeline( unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=self.dummy_extractor , ) lowerCamelCase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__UpperCAmelCase ) lowerCamelCase_ = alt_pipe.to(__UpperCAmelCase ) alt_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCamelCase_ = "A painting of a squirrel eating a burger" lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = alt_pipe( [prompt] , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , image=__UpperCAmelCase , ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) # resize to resolution that is divisible by 8 but not 16 or 32 lowerCamelCase_ = init_image.resize((760, 504) ) lowerCamelCase_ = "BAAI/AltDiffusion" lowerCamelCase_ = AltDiffusionImgaImgPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ = "A fantasy landscape, trending on artstation" lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=__UpperCAmelCase , output_type="np" , ) lowerCamelCase_ = output.images[0] lowerCamelCase_ = image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) lowerCamelCase_ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self ): """simple docstring""" lowerCamelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) lowerCamelCase_ = init_image.resize((768, 512) ) lowerCamelCase_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" ) lowerCamelCase_ = "BAAI/AltDiffusion" lowerCamelCase_ = AltDiffusionImgaImgPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ = "A fantasy landscape, trending on artstation" lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=__UpperCAmelCase , output_type="np" , ) lowerCamelCase_ = output.images[0] assert image.shape == (512, 768, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1e-2
55
'''simple docstring''' def __lowercase ( __lowercase = 100 ) -> int: '''simple docstring''' _A = n * (n + 1) * (2 * n + 1) / 6 _A = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
79
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
323
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') lowerCamelCase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the training data.'''} ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the validation data.'''} ) snake_case = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) snake_case = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) snake_case = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) snake_case = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : Optional[int]=192 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=0.6 ): '''simple docstring''' _A = input_size _A = mask_patch_size _A = model_patch_size _A = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("Input size must be divisible by mask patch size" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("Mask patch size must be divisible by model patch size" ) _A = self.input_size // self.mask_patch_size _A = self.mask_patch_size // self.model_patch_size _A = self.rand_size**2 _A = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : Any ): '''simple docstring''' _A = np.random.permutation(self.token_count )[: self.mask_count] _A = np.zeros(self.token_count , dtype=__UpperCAmelCase ) _A = 1 _A = mask.reshape((self.rand_size, self.rand_size) ) _A = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def __lowercase ( __lowercase ) -> str: '''simple docstring''' _A = torch.stack([example["pixel_values"] for example in examples] ) _A = torch.stack([example["mask"] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def __lowercase ( ) -> Dict: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mim" , __lowercase , __lowercase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(__lowercase ) transformers.utils.logging.set_verbosity(__lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if "validation" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0: _A = ds["train"].train_test_split(data_args.train_val_split ) _A = split["train"] _A = split["test"] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: _A = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowercase ) elif model_args.model_name_or_path: _A = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(__lowercase , "decoder_type" ): _A = "simmim" # adapt config _A = model_args.image_size if model_args.image_size is not None else config.image_size _A = model_args.patch_size if model_args.patch_size is not None else config.patch_size _A = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { "image_size": model_args.image_size, "patch_size": model_args.patch_size, "encoder_stride": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: _A = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase ) elif model_args.model_name_or_path: _A = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } _A = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: _A = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) _A = AutoModelForMaskedImageModeling.from_config(__lowercase ) if training_args.do_train: _A = ds["train"].column_names else: _A = ds["validation"].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = "image" elif "img" in column_names: _A = "img" else: _A = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py _A = Compose( [ Lambda(lambda __lowercase : img.convert("RGB" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator _A = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(__lowercase ): _A = [transforms(__lowercase ) for image in examples[image_column_name]] _A = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: _A = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__lowercase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: _A = ( ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__lowercase ) # Initialize our trainer _A = Trainer( model=__lowercase , args=__lowercase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics("eval" , __lowercase ) trainer.save_metrics("eval" , __lowercase ) # Write model card and (optionally) push to hub _A = { "finetuned_from": model_args.model_name_or_path, "tasks": "masked-image-modeling", "dataset": data_args.dataset_name, "tags": ["masked-image-modeling"], } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase ) else: trainer.create_model_card(**__lowercase ) if __name__ == "__main__": main()
79
0
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( "pipelines_utils", "0.22.0", "Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.", standard_warn=False, stacklevel=3, )
209
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''canine''' def __init__( self : Dict , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : int=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[Any]=16384 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Dict=1E-12 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : int=0xE000 , __UpperCAmelCase : List[Any]=0xE001 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : int=16384 , __UpperCAmelCase : Union[str, Any]=128 , **__UpperCAmelCase : Dict , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = type_vocab_size _A = layer_norm_eps # Character config: _A = downsampling_rate _A = upsampling_kernel_size _A = num_hash_functions _A = num_hash_buckets _A = local_transformer_stride
79
0
def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = len(__lowercase ) + 1 SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowercase ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. SCREAMING_SNAKE_CASE : str = [[0 for i in range(__lowercase )] for j in range(__lowercase )] # since string of zero length match pattern of zero length SCREAMING_SNAKE_CASE : List[str] = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , __lowercase ): SCREAMING_SNAKE_CASE : List[str] = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , __lowercase ): SCREAMING_SNAKE_CASE : List[Any] = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , __lowercase ): for j in range(1 , __lowercase ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": SCREAMING_SNAKE_CASE : int = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: SCREAMING_SNAKE_CASE : Any = 1 elif pattern[j - 2] in (input_string[i - 1], "."): SCREAMING_SNAKE_CASE : Union[str, Any] = dp[i - 1][j] else: SCREAMING_SNAKE_CASE : Optional[Any] = 0 else: SCREAMING_SNAKE_CASE : List[Any] = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") a__ : Dict = '''aab''' a__ : str = '''c*a*b''' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(F"{input_string} matches the given pattern {pattern}") else: print(F"{input_string} does not match with the given pattern {pattern}")
313
'''simple docstring''' class _UpperCAmelCase : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : list[int] ): '''simple docstring''' _A = len(__UpperCAmelCase ) _A = [0] * len_array if len_array > 0: _A = array[0] for i in range(1 , __UpperCAmelCase ): _A = self.prefix_sum[i - 1] + array[i] def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ): '''simple docstring''' _A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__UpperCAmelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
79
0
'''simple docstring''' import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC SCREAMING_SNAKE_CASE__ = parse(importlib.metadata.version('torch')) def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[Any]: if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(F"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}" ) UpperCamelCase = STR_OPERATION_TO_FUNC[operation] if isinstance(__lowercase , __lowercase ): UpperCamelCase = parse(importlib.metadata.version(__lowercase ) ) return operation(__lowercase , parse(__lowercase ) ) def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[Any]: return compare_versions(__lowercase , __lowercase , __lowercase )
321
'''simple docstring''' from typing import List import numpy as np def __lowercase ( __lowercase ) -> int: '''simple docstring''' _A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) _A = max(lists_lengths.values() , default=0 ) return max(1 , __lowercase ) def __lowercase ( __lowercase , __lowercase ) -> List[range]: '''simple docstring''' _A = [] for group_idx in range(__lowercase ): _A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _A = range(__lowercase , start + num_shards_to_add ) shards_indices_per_group.append(__lowercase ) return shards_indices_per_group def __lowercase ( __lowercase , __lowercase ) -> List[dict]: '''simple docstring''' _A = _number_of_shards_in_gen_kwargs(__lowercase ) if num_shards == 1: return [dict(__lowercase )] else: _A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__lowercase , __lowercase ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__lowercase ) ) ] def __lowercase ( __lowercase ) -> dict: '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __lowercase ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __lowercase ( __lowercase , __lowercase ) -> dict: '''simple docstring''' _A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )} _A = {} for size in list_sizes: _A = list(range(__lowercase ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _A = dict(__lowercase ) for key, value in shuffled_kwargs.items(): if isinstance(__lowercase , __lowercase ): _A = [value[i] for i in indices_per_size[len(__lowercase )]] return shuffled_kwargs
79
0
"""simple docstring""" from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
91
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_jukebox''': [ '''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''JukeboxConfig''', '''JukeboxPriorConfig''', '''JukeboxVQVAEConfig''', ], '''tokenization_jukebox''': ['''JukeboxTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''JukeboxModel''', '''JukeboxPreTrainedModel''', '''JukeboxVQVAE''', '''JukeboxPrior''', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
0
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
60
'''simple docstring''' from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Union[str, Any] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None ): '''simple docstring''' super().__init__() _A = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" _A = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ) else: _A = None _A = torch.nn.Parameter(__UpperCAmelCase ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 def __init__( self : Any , __UpperCAmelCase : VQModel , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : TransformeraDModel , __UpperCAmelCase : VQDiffusionScheduler , __UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ): '''simple docstring''' super().__init__() self.register_modules( vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ): '''simple docstring''' _A = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1 # get prompt text embeddings _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) _A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) _A = text_input_ids[:, : self.tokenizer.model_max_length] _A = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 _A = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate text embeddings for each generation per prompt _A = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: _A = self.learned_classifier_free_sampling_embeddings.embeddings _A = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 ) else: _A = [""] * batch_size _A = text_input_ids.shape[-1] _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , ) _A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings _A = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _A = negative_prompt_embeds.shape[1] _A = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 ) _A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self : Optional[Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 5.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = 1 elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = len(__UpperCAmelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' ) _A = batch_size * num_images_per_prompt _A = guidance_scale > 1.0 _A = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(__UpperCAmelCase )}.''' ) # get the initial completely masked latents unless the user supplied it _A = (batch_size, self.transformer.num_latent_pixels) if latents is None: _A = self.transformer.num_vector_embeds - 1 _A = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) _A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device ) _A = self.scheduler.timesteps.to(self.device ) _A = latents for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ): # expand the sample if we are doing classifier free guidance _A = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` _A = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample if do_classifier_free_guidance: _A , _A = model_output.chunk(2 ) _A = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase ) _A = self.truncate(__UpperCAmelCase , __UpperCAmelCase ) # remove `log(0)`'s (`-inf`s) _A = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = self.vqvae.config.vq_embed_dim _A = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) _A = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase ) _A = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample _A = (image / 2 + 0.5).clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : float ): '''simple docstring''' _A , _A = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase ) _A = torch.exp(__UpperCAmelCase ) _A = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out _A = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase ) _A = torch.cat((all_true, keep_mask) , dim=1 ) _A = keep_mask[:, :-1, :] _A = keep_mask.gather(1 , indices.argsort(1 ) ) _A = log_p_x_0.clone() _A = -torch.inf # -inf = log(0) return rv
79
0
"""simple docstring""" import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]: A__ = [ "decoder.version", "decoder.output_projection.weight", "_float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: A__, A__ = emb.weight.shape A__ = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) A__ = emb.weight.data return lin_layer def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]: A__ = torch.load(__lowercase , map_location="cpu" ) A__ = Namespace(**checkpoint["cfg"]["model"] ) A__ = checkpoint["model"] remove_ignore_keys_(__lowercase ) A__ = state_dict["decoder.embed_tokens.weight"].shape[0] A__ = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()} A__ = XGLMConfig( vocab_size=__lowercase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) A__ = XGLMForCausalLM(__lowercase ) A__ = model.load_state_dict(__lowercase , strict=__lowercase ) print(__lowercase ) A__ = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") SCREAMING_SNAKE_CASE = parser.parse_args() SCREAMING_SNAKE_CASE = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
247
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def __lowercase ( __lowercase , __lowercase=False ) -> int: '''simple docstring''' _A = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: _A = "" else: _A = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[ : config.hidden_size, : ] _A = in_proj_bias[: config.hidden_size] _A = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _A = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _A = in_proj_weight[ -config.hidden_size :, : ] _A = in_proj_bias[-config.hidden_size :] def __lowercase ( __lowercase ) -> List[str]: '''simple docstring''' _A = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Tuple: '''simple docstring''' _A = dct.pop(__lowercase ) _A = val def __lowercase ( ) -> List[str]: '''simple docstring''' _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' _A = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=__lowercase , ) _A = ViTHybridConfig(backbone_config=__lowercase , image_size=384 , num_labels=1000 ) _A = False # load original model from timm _A = timm.create_model(__lowercase , pretrained=__lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _A = timm_model.state_dict() if base_model: remove_classification_head_(__lowercase ) _A = create_rename_keys(__lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase , __lowercase ) _A = "huggingface/label-files" _A = "imagenet-1k-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _A = ViTHybridModel(__lowercase ).eval() else: _A = ViTHybridForImageClassification(__lowercase ).eval() model.load_state_dict(__lowercase ) # create image processor _A = create_transform(**resolve_data_config({} , model=__lowercase ) ) _A = transform.transforms _A = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _A = ViTHybridImageProcessor( do_resize=__lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _A = prepare_img() _A = transform(__lowercase ).unsqueeze(0 ) _A = processor(__lowercase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__lowercase , __lowercase ) # verify logits with torch.no_grad(): _A = model(__lowercase ) _A = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: _A = timm_model.forward_features(__lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowercase , outputs.pooler_output , atol=1e-3 ) else: _A = timm_model(__lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__lowercase ) if push_to_hub: print(F'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(F'''ybelkada/{vit_name}''' ) processor.push_to_hub(F'''ybelkada/{vit_name}''' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_r50_s16_384''', type=str, help='''Name of the hybrid ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) lowerCamelCase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
79
0