code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
def UpperCamelCase__( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int )->int: if index == number_of_items: return 0 A__ = 0 A__ = 0 A__ = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , index + 1 ) if weights[index] <= max_weight: A__ = values[index] + knapsack( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , max_weight - weights[index] , index + 1 ) return max(UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
193
from __future__ import annotations import math def UpperCamelCase__( UpperCamelCase__ : int )->bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def UpperCamelCase__( UpperCamelCase__ : int )->list[int]: A__ = str(UpperCamelCase__ ) A__ = [n] for i in range(1 , len(UpperCamelCase__ ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def UpperCamelCase__( UpperCamelCase__ : int )->bool: if len(str(UpperCamelCase__ ) ) > 3: if not is_prime(int(str(UpperCamelCase__ )[-3:] ) ) or not is_prime(int(str(UpperCamelCase__ )[:3] ) ): return False return True def UpperCamelCase__( UpperCamelCase__ : int = 11 )->list[int]: A__ = [] A__ = 13 while len(UpperCamelCase__ ) != count: if validate(UpperCamelCase__ ): A__ = list_truncated_nums(UpperCamelCase__ ) if all(is_prime(UpperCamelCase__ ) for i in list_nums ): list_truncated_primes.append(UpperCamelCase__ ) num += 2 return list_truncated_primes def UpperCamelCase__( )->int: return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(F"{sum(compute_truncated_primes(11)) = }")
193
1
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def a__ ( ) -> Generator[int, None, None]: UpperCAmelCase__ : dict[int, int] = {} UpperCAmelCase__ : Tuple = 2 while True: UpperCAmelCase__ : str = factor_map.pop(lowerCAmelCase , lowerCAmelCase ) if factor: UpperCAmelCase__ : str = factor + prime while x in factor_map: x += factor UpperCAmelCase__ : Dict = factor else: UpperCAmelCase__ : str = prime yield prime prime += 1 def a__ ( lowerCAmelCase = 1E10 ) -> int: UpperCAmelCase__ : Optional[int] = sieve() UpperCAmelCase__ : Union[str, Any] = 1 while True: UpperCAmelCase__ : Tuple = next(lowerCAmelCase ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(lowerCAmelCase ) n += 2 if __name__ == "__main__": print(solution())
370
"""simple docstring""" import numpy # List of input, output pairs _A = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) _A = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50)) _A = [2, 4, 1, 5] _A = len(train_data) _A = 0.009 def a__ ( lowerCAmelCase , lowerCAmelCase="train" ) -> int: return calculate_hypothesis_value(lowerCAmelCase , lowerCAmelCase ) - output( lowerCAmelCase , lowerCAmelCase ) def a__ ( lowerCAmelCase ) -> int: UpperCAmelCase__ : Dict = 0 for i in range(len(lowerCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[int]: if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Any: if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def a__ ( lowerCAmelCase , lowerCAmelCase=m ) -> List[str]: UpperCAmelCase__ : Optional[int] = 0 for i in range(lowerCAmelCase ): if index == -1: summation_value += _error(lowerCAmelCase ) else: summation_value += _error(lowerCAmelCase ) * train_data[i][0][index] return summation_value def a__ ( lowerCAmelCase ) -> List[Any]: UpperCAmelCase__ : int = summation_of_cost_derivative(lowerCAmelCase , lowerCAmelCase ) / m return cost_derivative_value def a__ ( ) -> List[str]: global parameter_vector # Tune these values to set a tolerance value for predicted output UpperCAmelCase__ : Any = 0.00_0002 UpperCAmelCase__ : Optional[int] = 0 UpperCAmelCase__ : Union[str, Any] = 0 while True: j += 1 UpperCAmelCase__ : str = [0, 0, 0, 0] for i in range(0 , len(lowerCAmelCase ) ): UpperCAmelCase__ : List[Any] = get_cost_derivative(i - 1 ) UpperCAmelCase__ : Tuple = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( lowerCAmelCase , lowerCAmelCase , atol=lowerCAmelCase , rtol=lowerCAmelCase , ): break UpperCAmelCase__ : Any = temp_parameter_vector print(("""Number of iterations:""", j) ) def a__ ( ) -> Optional[int]: for i in range(len(lowerCAmelCase ) ): print(("""Actual output value:""", output(lowerCAmelCase , """test""" )) ) print(("""Hypothesis output:""", calculate_hypothesis_value(lowerCAmelCase , """test""" )) ) if __name__ == "__main__": run_gradient_descent() print("""\nTesting gradient descent for a linear hypothesis function.\n""") test_gradient_descent()
166
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: __snake_case =None __snake_case =logging.get_logger(__name__) __snake_case ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} __snake_case ={ """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } __snake_case ={ """facebook/mbart-large-en-ro""": 1_024, """facebook/mbart-large-cc25""": 1_024, } # fmt: off __snake_case =["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class UpperCAmelCase_ ( __lowercase ): lowerCamelCase : Dict = VOCAB_FILES_NAMES lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : List[str] = ['''input_ids''', '''attention_mask'''] lowerCamelCase : List[Any] = MBartTokenizer lowerCamelCase : List[int] = [] lowerCamelCase : List[int] = [] def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : str="<s>" , UpperCAmelCase__ : str="<unk>" , UpperCAmelCase__ : Dict="<pad>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : str , ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token super().__init__( vocab_file=UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , ) lowerCAmelCase = vocab_file lowerCAmelCase = False if not self.vocab_file else True lowerCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) lowerCAmelCase = { lang_code: self.convert_tokens_to_ids(UpperCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } lowerCAmelCase = src_lang if src_lang is not None else 'en_XX' lowerCAmelCase = self.convert_tokens_to_ids(self._src_lang ) lowerCAmelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __UpperCAmelCase ( self : int ) -> str: return self._src_lang @src_lang.setter def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : str ) -> None: lowerCAmelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] , **UpperCAmelCase__ : Tuple ) -> Tuple: if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) lowerCAmelCase = src_lang lowerCAmelCase = self(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ ) lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase__ ) lowerCAmelCase = tgt_lang_id return inputs def __UpperCAmelCase ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str = "en_XX" , UpperCAmelCase__ : Optional[List[str]] = None , UpperCAmelCase__ : str = "ro_RO" , **UpperCAmelCase__ : Dict , ) -> BatchEncoding: lowerCAmelCase = src_lang lowerCAmelCase = tgt_lang return super().prepare_seqaseq_batch(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ) def __UpperCAmelCase ( self : List[Any] ) -> List[str]: return self.set_src_lang_special_tokens(self.src_lang ) def __UpperCAmelCase ( self : List[str] ) -> List[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : int ) -> None: lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase__ ) lowerCAmelCase = [] lowerCAmelCase = [self.eos_token_id, self.cur_lang_code] lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens ) lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens ) lowerCAmelCase = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str ) -> None: lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase__ ) lowerCAmelCase = [] lowerCAmelCase = [self.eos_token_id, self.cur_lang_code] lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens ) lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens ) lowerCAmelCase = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(UpperCAmelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' ) return lowerCAmelCase = os.path.join( UpperCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ): copyfile(self.vocab_file , UpperCAmelCase__ ) return (out_vocab_file,)
4
"""simple docstring""" import sys from collections import defaultdict class lowerCamelCase__ : def __init__( self ): """simple docstring""" snake_case : Dict = [] def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ): """simple docstring""" return self.node_position[vertex] def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" snake_case : Dict = pos def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: snake_case : Any = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: snake_case : Any = 2 * start + 1 else: snake_case : Union[str, Any] = 2 * start + 2 if heap[smallest_child] < heap[start]: snake_case , snake_case : Dict = heap[smallest_child], positions[smallest_child] snake_case , snake_case : Any = ( heap[start], positions[start], ) snake_case , snake_case : str = temp, tempa snake_case : Dict = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , SCREAMING_SNAKE_CASE ) self.top_to_bottom(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" snake_case : Optional[Any] = position[index] while index != 0: snake_case : Dict = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: snake_case : Tuple = heap[parent] snake_case : str = position[parent] self.set_position(position[parent] , SCREAMING_SNAKE_CASE ) else: snake_case : Union[str, Any] = val snake_case : List[Any] = temp self.set_position(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) break snake_case : Optional[Any] = parent else: snake_case : Optional[int] = val snake_case : List[Any] = temp self.set_position(SCREAMING_SNAKE_CASE , 0 ) def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" snake_case : List[str] = len(SCREAMING_SNAKE_CASE ) // 2 - 1 for i in range(SCREAMING_SNAKE_CASE , -1 , -1 ): self.top_to_bottom(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" snake_case : Union[str, Any] = positions[0] snake_case : List[str] = sys.maxsize self.top_to_bottom(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) return temp def UpperCamelCase__ ( lowercase__ : Union[str, Any] ): snake_case : Tuple = Heap() snake_case : List[str] = [0] * len(lowercase__ ) snake_case : Optional[int] = [-1] * len(lowercase__ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph snake_case : Optional[int] = [] # Heap of Distance of vertices from their neighboring vertex snake_case : List[Any] = [] for vertex in range(len(lowercase__ ) ): distance_tv.append(sys.maxsize ) positions.append(lowercase__ ) heap.node_position.append(lowercase__ ) snake_case : Optional[int] = [] snake_case : Union[str, Any] = 1 snake_case : Union[str, Any] = sys.maxsize for neighbor, distance in adjacency_list[0]: snake_case : List[Any] = 0 snake_case : Tuple = distance heap.heapify(lowercase__ , lowercase__ ) for _ in range(1 , len(lowercase__ ) ): snake_case : Optional[Any] = heap.delete_minimum(lowercase__ , lowercase__ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) snake_case : Optional[int] = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(lowercase__ )] ): snake_case : str = distance heap.bottom_to_top( lowercase__ , heap.get_position(lowercase__ ) , lowercase__ , lowercase__ ) snake_case : Optional[int] = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > __A = int(input("Enter number of edges: ").strip()) __A = defaultdict(list) for _ in range(edges_number): __A = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
148
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Dict = ["EncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = ["TFEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = ["FlaxEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
316
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class a_ ( snake_case_ ): '''simple docstring''' lowerCamelCase__ : Dict = ['image_processor', 'tokenizer'] lowerCamelCase__ : Optional[int] = 'CLIPImageProcessor' lowerCamelCase__ : List[str] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ): '''simple docstring''' lowerCamelCase__ : str = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.', lowerCamelCase_, ) lowerCamelCase__ : int = kwargs.pop('feature_extractor' ) lowerCamelCase__ : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(lowerCamelCase_, lowerCamelCase_ ) def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ): '''simple docstring''' if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: lowerCamelCase__ : Any = self.tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ ) if images is not None: lowerCamelCase__ : List[Any] = self.image_processor(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ ) if text is not None and images is not None: lowerCamelCase__ : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCamelCase_ ), tensor_type=lowerCamelCase_ ) def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ): '''simple docstring''' return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ ) def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ): '''simple docstring''' return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ ) @property def a__ (self ): '''simple docstring''' lowerCamelCase__ : str = self.tokenizer.model_input_names lowerCamelCase__ : List[str] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
316
1
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase : str = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] = ["""GPTNeoXTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXForCausalLM""", """GPTNeoXForQuestionAnswering""", """GPTNeoXForSequenceClassification""", """GPTNeoXForTokenClassification""", """GPTNeoXLayer""", """GPTNeoXModel""", """GPTNeoXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
291
"""simple docstring""" from sklearn.metrics import recall_score import datasets lowerCAmelCase : Any = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ lowerCAmelCase : Any = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ lowerCAmelCase : Any = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def _lowerCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None , _a="warn" , ): """simple docstring""" lowerCamelCase = recall_score( _a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a , zero_division=_a , ) return {"recall": float(_a ) if score.size == 1 else score}
291
1
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : str = DebertaTokenizer __UpperCamelCase : str = True __UpperCamelCase : Any = DebertaTokenizerFast def _snake_case (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __lowerCAmelCase = {'''unk_token''': '''[UNK]'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) def _snake_case (self , **__lowercase ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , __lowercase ): __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = '''lower newer''' return input_text, output_text def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __lowerCAmelCase = tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) __lowerCAmelCase = tokens + [tokenizer.unk_token] __lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = tokenizer('''Hello''' , '''World''' ) __lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __lowercase ) @slow def _snake_case (self ): __lowerCAmelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _snake_case (self ): __lowerCAmelCase = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: __lowerCAmelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] __lowerCAmelCase = tokenizer(__lowercase , padding=__lowercase ) __lowerCAmelCase = [tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) for seq in encoding['''input_ids''']] # fmt: off __lowerCAmelCase = { '''input_ids''': [ [1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on __lowerCAmelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __lowercase ) for expected, decoded in zip(__lowercase , __lowercase ): self.assertEqual(__lowercase , __lowercase )
9
'''simple docstring''' import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed _UpperCAmelCase : Dict = """true""" def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=1_6): set_seed(4_2) __lowerCAmelCase = RegressionModel() __lowerCAmelCase = deepcopy(lowerCamelCase) __lowerCAmelCase = RegressionDataset(length=lowerCamelCase) __lowerCAmelCase = DataLoader(lowerCamelCase, batch_size=lowerCamelCase) model.to(accelerator.device) __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase) return model, ddp_model, dataloader def __magic_name__( lowerCamelCase, lowerCamelCase=False): __lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''') __lowerCAmelCase = load_dataset('''glue''', '''mrpc''', split='''validation''') def tokenize_function(lowerCamelCase): __lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase) return outputs with accelerator.main_process_first(): __lowerCAmelCase = dataset.map( lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], ) __lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''') def collate_fn(lowerCamelCase): if use_longest: return tokenizer.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''') return tokenizer.pad(lowerCamelCase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''') return DataLoader(lowerCamelCase, shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=1_6) def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase, split_batches=lowerCamelCase) __lowerCAmelCase = get_dataloader(lowerCamelCase, not dispatch_batches) __lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained( '''hf-internal-testing/mrpc-bert-base-cased''', return_dict=lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [] for batch in dataloader: __lowerCAmelCase , __lowerCAmelCase = batch.values() with torch.no_grad(): __lowerCAmelCase = model(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target)) logits_and_targets.append((logit, target)) __lowerCAmelCase , __lowerCAmelCase = [], [] for logit, targ in logits_and_targets: logits.append(lowerCamelCase) targs.append(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = torch.cat(lowerCamelCase), torch.cat(lowerCamelCase) return logits, targs def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1_6): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowerCamelCase, lowerCamelCase, lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowerCamelCase, lowerCamelCase, lowerCamelCase) assert ( len(lowerCamelCase) == num_samples ), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase)}""" def __magic_name__( lowerCamelCase = False, lowerCamelCase = False): __lowerCAmelCase = evaluate.load('''glue''', '''mrpc''') __lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowerCamelCase, lowerCamelCase) # First do baseline __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''no'''] model.to(lowerCamelCase) model.eval() for batch in dataloader: batch.to(lowerCamelCase) with torch.inference_mode(): __lowerCAmelCase = model(**lowerCamelCase) __lowerCAmelCase = outputs.logits.argmax(dim=-1) metric.add_batch(predictions=lowerCamelCase, references=batch['''labels''']) __lowerCAmelCase = metric.compute() # Then do distributed __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''ddp'''] model.eval() for batch in dataloader: with torch.inference_mode(): __lowerCAmelCase = model(**lowerCamelCase) __lowerCAmelCase = outputs.logits.argmax(dim=-1) __lowerCAmelCase = batch['''labels'''] __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references)) metric.add_batch(predictions=lowerCamelCase, references=lowerCamelCase) __lowerCAmelCase = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key], distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n""" def __magic_name__( ): __lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('''**Testing gather_for_metrics**''') for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""") test_mrpc(lowerCamelCase, lowerCamelCase) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test torch metrics**''') for split_batches in [True, False]: for dispatch_batches in [True, False]: __lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase) if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""") test_torch_metrics(lowerCamelCase, 9_9) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test last batch is not dropped when perfectly divisible**''') __lowerCAmelCase = Accelerator() test_torch_metrics(lowerCamelCase, 5_1_2) accelerator.state._reset_state() def __magic_name__( lowerCamelCase): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
9
1
'''simple docstring''' from __future__ import annotations import queue class _snake_case : def __init__( self , _lowerCamelCase): UpperCAmelCase__ : int = data UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : Optional[int] = None def _UpperCamelCase ( ): print("""\n********Press N to stop entering at any point of time********\n""" ) UpperCAmelCase__ : int = input("""Enter the value of the root node: """ ).strip().lower() UpperCAmelCase__ : Any = queue.Queue() UpperCAmelCase__ : str = TreeNode(int(A__ ) ) q.put(A__ ) while not q.empty(): UpperCAmelCase__ : Optional[int] = q.get() UpperCAmelCase__ : str = f'''Enter the left node of {node_found.data}: ''' UpperCAmelCase__ : Tuple = input(A__ ).strip().lower() or """n""" if check == "n": return tree_node UpperCAmelCase__ : List[Any] = TreeNode(int(A__ ) ) UpperCAmelCase__ : Dict = left_node q.put(A__ ) UpperCAmelCase__ : Dict = f'''Enter the right node of {node_found.data}: ''' UpperCAmelCase__ : int = input(A__ ).strip().lower() or """n""" if check == "n": return tree_node UpperCAmelCase__ : Union[str, Any] = TreeNode(int(A__ ) ) UpperCAmelCase__ : List[Any] = right_node q.put(A__ ) raise def _UpperCamelCase ( UpperCamelCase__ ): if not isinstance(A__ , A__ ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def _UpperCamelCase ( UpperCamelCase__ ): if not isinstance(A__ , A__ ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def _UpperCamelCase ( UpperCamelCase__ ): if not isinstance(A__ , A__ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def _UpperCamelCase ( UpperCamelCase__ ): if not isinstance(A__ , A__ ) or not node: return UpperCAmelCase__ : Dict = queue.Queue() q.put(A__ ) while not q.empty(): UpperCAmelCase__ : Dict = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _UpperCamelCase ( UpperCamelCase__ ): if not isinstance(A__ , A__ ) or not node: return UpperCAmelCase__ : Any = queue.Queue() q.put(A__ ) while not q.empty(): UpperCAmelCase__ : Optional[int] = [] while not q.empty(): UpperCAmelCase__ : Tuple = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(A__ ) def _UpperCamelCase ( UpperCamelCase__ ): if not isinstance(A__ , A__ ) or not node: return UpperCAmelCase__ : str = [] UpperCAmelCase__ : str = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(A__ ) UpperCAmelCase__ : Union[str, Any] = n.left # end of while means current node doesn't have left child UpperCAmelCase__ : Dict = stack.pop() # start to traverse its right child UpperCAmelCase__ : Optional[int] = n.right def _UpperCamelCase ( UpperCamelCase__ ): if not isinstance(A__ , A__ ) or not node: return UpperCAmelCase__ : List[Any] = [] UpperCAmelCase__ : str = node while n or stack: while n: stack.append(A__ ) UpperCAmelCase__ : Union[str, Any] = n.left UpperCAmelCase__ : Tuple = stack.pop() print(n.data , end=""",""" ) UpperCAmelCase__ : int = n.right def _UpperCamelCase ( UpperCamelCase__ ): if not isinstance(A__ , A__ ) or not node: return UpperCAmelCase__ , UpperCAmelCase__ : Any = [], [] UpperCAmelCase__ : Optional[Any] = node stacka.append(A__ ) while stacka: # to find the reversed order of post order, store it in stack2 UpperCAmelCase__ : Union[str, Any] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(A__ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def _UpperCamelCase ( UpperCamelCase__ = "" , UpperCamelCase__=5_0 , UpperCamelCase__="*" ): if not s: return "\n" + width * char UpperCAmelCase__ , UpperCAmelCase__ : str = divmod(width - len(A__ ) - 2 , 2 ) return f'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt('Binary Tree Traversals')) __A =build_tree() print(prompt('Pre Order Traversal')) pre_order(node) print(prompt() + '\n') print(prompt('In Order Traversal')) in_order(node) print(prompt() + '\n') print(prompt('Post Order Traversal')) post_order(node) print(prompt() + '\n') print(prompt('Level Order Traversal')) level_order(node) print(prompt() + '\n') print(prompt('Actual Level Order Traversal')) level_order_actual(node) print('*' * 50 + '\n') print(prompt('Pre Order Traversal - Iteration Version')) pre_order_iter(node) print(prompt() + '\n') print(prompt('In Order Traversal - Iteration Version')) in_order_iter(node) print(prompt() + '\n') print(prompt('Post Order Traversal - Iteration Version')) post_order_iter(node) print(prompt())
163
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__: def __init__( self: Any , UpperCamelCase_: str , UpperCamelCase_: Dict ): __lowerCamelCase = question_encoder __lowerCamelCase = generator __lowerCamelCase = self.question_encoder def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] ): if os.path.isfile(UpperCamelCase_ ): raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' ) os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ ) __lowerCamelCase = os.path.join(UpperCamelCase_ , """question_encoder_tokenizer""" ) __lowerCamelCase = os.path.join(UpperCamelCase_ , """generator_tokenizer""" ) self.question_encoder.save_pretrained(UpperCamelCase_ ) self.generator.save_pretrained(UpperCamelCase_ ) @classmethod def lowerCAmelCase__ ( cls: List[Any] , UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer __lowerCamelCase = kwargs.pop("""config""" , UpperCamelCase_ ) if config is None: __lowerCamelCase = RagConfig.from_pretrained(UpperCamelCase_ ) __lowerCamelCase = AutoTokenizer.from_pretrained( UpperCamelCase_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" ) __lowerCamelCase = AutoTokenizer.from_pretrained( UpperCamelCase_ , config=config.generator , subfolder="""generator_tokenizer""" ) return cls(question_encoder=UpperCamelCase_ , generator=UpperCamelCase_ ) def __call__( self: Tuple , *UpperCamelCase_: int , **UpperCamelCase_: int ): return self.current_tokenizer(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple , *UpperCamelCase_: List[Any] , **UpperCamelCase_: List[Any] ): return self.generator.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] , *UpperCamelCase_: str , **UpperCamelCase_: Union[str, Any] ): return self.generator.decode(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase = self.question_encoder def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = self.generator def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "longest" , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , **UpperCamelCase_: int , ): warnings.warn( """`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """ """regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """ """context manager to prepare your targets. See the documentation of your specific tokenizer for more """ """details""" , UpperCamelCase_ , ) if max_length is None: __lowerCamelCase = self.current_tokenizer.model_max_length __lowerCamelCase = self( UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: __lowerCamelCase = self.current_tokenizer.model_max_length __lowerCamelCase = self( text_target=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , ) __lowerCamelCase = labels["""input_ids"""] return model_inputs
12
0
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = XLMRobertaTokenizer lowerCAmelCase__ = XLMRobertaTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = True def lowercase_ ( self : Dict ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase__ : Union[str, Any] = XLMRobertaTokenizer(_A , keep_accents=_A ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = '''<pad>''' UpperCAmelCase__ : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(_A ) , 1_002 ) def lowercase_ ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_002 ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = XLMRobertaTokenizer(_A , keep_accents=_A ) UpperCAmelCase__ : int = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual( _A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def lowercase_ ( self : str ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCAmelCase__ : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) UpperCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(_A , **_A ) UpperCAmelCase__ : List[str] = tempfile.mkdtemp() UpperCAmelCase__ : Any = tokenizer_r.save_pretrained(_A ) UpperCAmelCase__ : Tuple = tokenizer_p.save_pretrained(_A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) UpperCAmelCase__ : Optional[int] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_A , _A ) # Checks everything loads correctly in the same way UpperCAmelCase__ : Any = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : Dict = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_A ) # Save tokenizer rust, legacy_format=True UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(_A , legacy_format=_A ) UpperCAmelCase__ : List[str] = tokenizer_p.save_pretrained(_A ) # Checks it save with the same files self.assertSequenceEqual(_A , _A ) # Checks everything loads correctly in the same way UpperCAmelCase__ : List[str] = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : List[str] = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) shutil.rmtree(_A ) # Save tokenizer rust, legacy_format=False UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Dict = tokenizer_r.save_pretrained(_A , legacy_format=_A ) UpperCAmelCase__ : str = tokenizer_p.save_pretrained(_A ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCAmelCase__ : Union[str, Any] = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) shutil.rmtree(_A ) @cached_property def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def lowercase_ ( self : Any ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(_A , f.name ) UpperCAmelCase__ : int = XLMRobertaTokenizer(f.name , keep_accents=_A ) UpperCAmelCase__ : str = pickle.dumps(_A ) pickle.loads(_A ) def lowercase_ ( self : int ): '''simple docstring''' if not self.test_rust_tokenizer: return UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer() UpperCAmelCase__ : Dict = '''I was born in 92000, and this is falsé.''' UpperCAmelCase__ : Dict = tokenizer.tokenize(_A ) UpperCAmelCase__ : List[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) UpperCAmelCase__ : int = tokenizer.encode(_A , add_special_tokens=_A ) UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) UpperCAmelCase__ : Any = self.get_rust_tokenizer() UpperCAmelCase__ : List[Any] = tokenizer.encode(_A ) UpperCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) @slow def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : str = '''Hello World!''' UpperCAmelCase__ : Tuple = [0, 35_378, 6_661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) UpperCAmelCase__ : Any = [ 0, 3_293, 83, 10, 4_552, 4_989, 7_986, 678, 10, 5_915, 111, 179_459, 124_850, 4, 6_044, 237, 12, 6, 5, 6, 4, 6_780, 705, 15, 1_388, 44, 378, 10_114, 711, 152, 20, 6, 5, 22_376, 642, 1_221, 15_190, 34_153, 450, 5_608, 959, 1_119, 57_702, 136, 186, 47, 1_098, 29_367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6_044, 237, 6_284, 50_901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : int = {'''input_ids''': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
369
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class lowerCamelCase_ ( unittest.TestCase ): def __init__( self : List[str] , _A : List[Any] , _A : Union[str, Any]=7 , _A : List[str]=3 , _A : str=30 , _A : Tuple=400 , _A : Optional[int]=True , _A : List[str]=None , _A : int=True , _A : int=[0.5, 0.5, 0.5] , _A : Optional[int]=[0.5, 0.5, 0.5] , _A : List[Any]=True , _A : str=1 / 255 , _A : Tuple=True , ): '''simple docstring''' UpperCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : Optional[Any] = batch_size UpperCAmelCase__ : List[str] = num_channels UpperCAmelCase__ : List[Any] = min_resolution UpperCAmelCase__ : List[str] = max_resolution UpperCAmelCase__ : Tuple = do_resize UpperCAmelCase__ : Union[str, Any] = size UpperCAmelCase__ : Dict = do_normalize UpperCAmelCase__ : Union[str, Any] = image_mean UpperCAmelCase__ : Optional[int] = image_std UpperCAmelCase__ : Dict = do_rescale UpperCAmelCase__ : Union[str, Any] = rescale_factor UpperCAmelCase__ : int = do_pad def lowercase_ ( self : Any ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowercase_ ( self : Any , _A : Union[str, Any] , _A : Union[str, Any]=False ): '''simple docstring''' if not batched: UpperCAmelCase__ : Optional[int] = image_inputs[0] if isinstance(_A , Image.Image ): UpperCAmelCase__ , UpperCAmelCase__ : str = image.size else: UpperCAmelCase__ , UpperCAmelCase__ : int = image.shape[1], image.shape[2] if w < h: UpperCAmelCase__ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w ) UpperCAmelCase__ : List[Any] = self.size['''shortest_edge'''] elif w > h: UpperCAmelCase__ : int = self.size['''shortest_edge'''] UpperCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h ) else: UpperCAmelCase__ : List[str] = self.size['''shortest_edge'''] UpperCAmelCase__ : Dict = self.size['''shortest_edge'''] else: UpperCAmelCase__ : int = [] for image in image_inputs: UpperCAmelCase__ , UpperCAmelCase__ : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[0] )[0] UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = DetaImageProcessor if is_vision_available() else None def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = DetaImageProcessingTester(self ) @property def lowercase_ ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''do_rescale''' ) ) self.assertTrue(hasattr(_A , '''do_pad''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} ) self.assertEqual(image_processor.do_pad , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' pass def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ , UpperCAmelCase__ : str = self.image_processor_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase__ : Union[str, Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: UpperCAmelCase__ : str = json.loads(f.read() ) UpperCAmelCase__ : Tuple = {'''image_id''': 39_769, '''annotations''': target} # encode them UpperCAmelCase__ : Optional[int] = DetaImageProcessor() UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , return_tensors='''pt''' ) # verify pixel values UpperCAmelCase__ : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape , _A ) UpperCAmelCase__ : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) ) # verify area UpperCAmelCase__ : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) ) # verify boxes UpperCAmelCase__ : int = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A ) UpperCAmelCase__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) ) # verify image_id UpperCAmelCase__ : str = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) ) # verify is_crowd UpperCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) ) # verify class_labels UpperCAmelCase__ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) ) # verify orig_size UpperCAmelCase__ : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) ) # verify size UpperCAmelCase__ : int = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) ) @slow def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: UpperCAmelCase__ : int = json.loads(f.read() ) UpperCAmelCase__ : str = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} UpperCAmelCase__ : Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them UpperCAmelCase__ : Any = DetaImageProcessor(format='''coco_panoptic''' ) UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' ) # verify pixel values UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape , _A ) UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) ) # verify area UpperCAmelCase__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) ) # verify boxes UpperCAmelCase__ : Dict = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A ) UpperCAmelCase__ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) ) # verify image_id UpperCAmelCase__ : Optional[int] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) ) # verify is_crowd UpperCAmelCase__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) ) # verify class_labels UpperCAmelCase__ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) ) # verify masks UpperCAmelCase__ : Dict = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A ) # verify orig_size UpperCAmelCase__ : str = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) ) # verify size UpperCAmelCase__ : Optional[Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
299
0
"""simple docstring""" from __future__ import annotations def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,): SCREAMING_SNAKE_CASE__ : Any = len(_snake_case ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_snake_case ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] ,[*diagonal_right_collisions, row - col] ,[*diagonal_left_collisions, row + col] ,_snake_case ,_snake_case ,) def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : list[list[str]] = [] depth_first_search([] ,[] ,[] ,_snake_case ,_snake_case ) # Print all the boards for board in boards: for column in board: print(_snake_case ) print("""""" ) print(len(_snake_case ) ,"""solutions were found.""" ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
25
import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/dummy-config.json""") class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = 0 def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: '''simple docstring''' self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base-uncased""" ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.for_model("""roberta""" ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> int: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""fake-roberta""" ) os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ ,"""config.json""" ) ,"""w""" ) as f: f.write(json.dumps({} ) ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertEqual(type(lowerCamelCase__ ) ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: '''simple docstring''' try: AutoConfig.register("""custom""" ,lowerCamelCase__ ) # Wrong model type will raise an error with self.assertRaises(lowerCamelCase__ ): AutoConfig.register("""model""" ,lowerCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase__ ): AutoConfig.register("""bert""" ,lowerCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,"""bert-base is not a local folder and is not a valid model identifier""" ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,revision="""aaaaaa""" ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' with self.assertRaises(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = "new-model" try: AutoConfig.register("""new-model""" ,lowerCamelCase__ ) # If remote code is not set, the default is to use local SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" ) # If remote code is disabled, we load the local one. SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" ) # If remote is enabled, we load from the Hub SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
296
0
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> float: """simple docstring""" A_ : Optional[Any] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(a_ )] ) A_ : Optional[Any] = np.array(a_ ) A_ : Optional[int] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , a_ ) ) , x.transpose() ) , a_ ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def UpperCAmelCase ( a_ , a_ , a_ ) -> float: """simple docstring""" A_ : List[str] = (1, 2, 1) A_ : Tuple = (1, 1, 0, 7) A_ : List[Any] = SARIMAX( a_ , exog=a_ , order=a_ , seasonal_order=a_ ) A_ : Tuple = model.fit(disp=a_ , maxiter=6_0_0 , method="""nm""" ) A_ : List[Any] = model_fit.predict(1 , len(a_ ) , exog=[test_match] ) return result[0] def UpperCAmelCase ( a_ , a_ , a_ ) -> float: """simple docstring""" A_ : int = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(a_ , a_ ) A_ : Tuple = regressor.predict(a_ ) return y_pred[0] def UpperCAmelCase ( a_ ) -> float: """simple docstring""" train_user.sort() A_ : Any = np.percentile(a_ , 2_5 ) A_ : Union[str, Any] = np.percentile(a_ , 7_5 ) A_ : str = qa - qa A_ : List[Any] = qa - (iqr * 0.1) return low_lim def UpperCAmelCase ( a_ , a_ ) -> bool: """simple docstring""" A_ : Dict = 0 A_ : Optional[Any] = 0 for i in list_vote: if i > actual_result: A_ : Optional[Any] = not_safe + 1 else: if abs(abs(a_ ) - abs(a_ ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) UpperCamelCase__ : List[str] = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]] UpperCamelCase__ : Optional[Any] = pd.DataFrame( data_input, columns=['total_user', 'total_even', 'days'] ) UpperCamelCase__ : Union[str, Any] = Normalizer().fit_transform(data_input_df.values) # split data UpperCamelCase__ : List[Any] = normalize_df[:, 2].tolist() UpperCamelCase__ : Tuple = normalize_df[:, 0].tolist() UpperCamelCase__ : Union[str, Any] = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) UpperCamelCase__ : Any = normalize_df[:, [1, 2]].tolist() UpperCamelCase__ : Optional[int] = x[: len(x) - 1] UpperCamelCase__ : Optional[Any] = x[len(x) - 1 :] # for linear regression & sarimax UpperCamelCase__ : Optional[int] = total_date[: len(total_date) - 1] UpperCamelCase__ : str = total_user[: len(total_user) - 1] UpperCamelCase__ : Tuple = total_match[: len(total_match) - 1] UpperCamelCase__ : List[str] = total_date[len(total_date) - 1 :] UpperCamelCase__ : List[Any] = total_user[len(total_user) - 1 :] UpperCamelCase__ : Dict = total_match[len(total_match) - 1 :] # voting system with forecasting UpperCamelCase__ : List[Any] = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data UpperCamelCase__ : Tuple = '' if data_safety_checker(res_vote, tst_user) else 'not ' print('Today\'s data is {not_str}safe.')
164
'''simple docstring''' from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def UpperCAmelCase ( a_ , a_ ) -> Union[str, Any]: """simple docstring""" A_ : Dict = [] for part_id in partition_order: A_ : List[str] = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect() for row_idx, row in enumerate(a_ ): expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def UpperCAmelCase ( ) -> int: """simple docstring""" A_ : Optional[int] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A_ : Optional[int] = spark.range(1_0_0 ).repartition(1 ) A_ : Optional[Any] = Spark(a_ ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=1_6 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 5_0 @require_not_windows @require_dill_gt_0_3_2 def UpperCAmelCase ( ) -> Union[str, Any]: """simple docstring""" A_ : str = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A_ : List[str] = spark.range(1_0 ).repartition(2 ) A_ : List[str] = [1, 0] A_ : List[Any] = _generate_iterable_examples(a_ , a_ ) # Reverse the partitions. A_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , a_ ) for i, (row_id, row_dict) in enumerate(generate_fn() ): A_ , A_ : List[str] = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def UpperCAmelCase ( ) -> Any: """simple docstring""" A_ : List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A_ : Dict = spark.range(1_0 ).repartition(1 ) A_ : int = SparkExamplesIterable(a_ ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(a_ ): assert row_id == F"0_{i}" assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def UpperCAmelCase ( ) -> int: """simple docstring""" A_ : Dict = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A_ : Union[str, Any] = spark.range(3_0 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("""numpy.random.Generator""" ) as generator_mock: A_ : Optional[int] = lambda a_ : x.reverse() A_ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [2, 1, 0] ) A_ : Any = SparkExamplesIterable(a_ ).shuffle_data_sources(a_ ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(a_ ): A_ , A_ : Any = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def UpperCAmelCase ( ) -> List[str]: """simple docstring""" A_ : Tuple = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A_ : List[Any] = spark.range(2_0 ).repartition(4 ) # Partitions 0 and 2 A_ : str = SparkExamplesIterable(a_ ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 A_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [0, 2] ) for i, (row_id, row_dict) in enumerate(a_ ): A_ , A_ : Optional[Any] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 A_ : Optional[Any] = SparkExamplesIterable(a_ ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 A_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [1, 3] ) for i, (row_id, row_dict) in enumerate(a_ ): A_ , A_ : Dict = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def UpperCAmelCase ( ) -> str: """simple docstring""" A_ : str = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A_ : List[Any] = spark.range(1_0_0 ).repartition(1 ) A_ : str = Spark(a_ ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 1_0_0
164
1
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError("""multiplicative_persistence() only accepts integral values""" ) if num < 0: raise ValueError("""multiplicative_persistence() does not accept negative values""" ) SCREAMING_SNAKE_CASE : List[str] = 0 SCREAMING_SNAKE_CASE : Dict = str(lowerCamelCase_ ) while len(lowerCamelCase_ ) != 1: SCREAMING_SNAKE_CASE : Any = [int(lowerCamelCase_ ) for i in num_string] SCREAMING_SNAKE_CASE : Optional[int] = 1 for i in range(0 , len(lowerCamelCase_ ) ): total *= numbers[i] SCREAMING_SNAKE_CASE : Any = str(lowerCamelCase_ ) steps += 1 return steps def __A ( lowerCamelCase_ ): """simple docstring""" if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError("""additive_persistence() only accepts integral values""" ) if num < 0: raise ValueError("""additive_persistence() does not accept negative values""" ) SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Optional[int] = str(lowerCamelCase_ ) while len(lowerCamelCase_ ) != 1: SCREAMING_SNAKE_CASE : Dict = [int(lowerCamelCase_ ) for i in num_string] SCREAMING_SNAKE_CASE : str = 0 for i in range(0 , len(lowerCamelCase_ ) ): total += numbers[i] SCREAMING_SNAKE_CASE : Tuple = str(lowerCamelCase_ ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
323
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = 3_84 SCREAMING_SNAKE_CASE : Union[str, Any] = 7 if "tiny" in model_name: SCREAMING_SNAKE_CASE : List[str] = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2) SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24) elif "small" in model_name: SCREAMING_SNAKE_CASE : Any = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24) elif "base" in model_name: SCREAMING_SNAKE_CASE : int = 1_28 SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32) SCREAMING_SNAKE_CASE : Optional[Any] = 12 SCREAMING_SNAKE_CASE : str = 5_12 elif "large" in model_name: SCREAMING_SNAKE_CASE : Tuple = 1_92 SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48) SCREAMING_SNAKE_CASE : Tuple = 12 SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68 # set label information SCREAMING_SNAKE_CASE : List[str] = 1_50 SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files""" SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig( embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) SCREAMING_SNAKE_CASE : List[str] = UperNetConfig( backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , ) return config def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = val def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE : Dict = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE : Any = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :] # fmt: on def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = x.shape[0] SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = x.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name] SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[ """state_dict""" ] for name, param in state_dict.items(): print(lowerCamelCase_ , param.shape ) SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ ) if "bn" in key: SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" ) SCREAMING_SNAKE_CASE : Optional[Any] = val # rename keys SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ ) for src, dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) read_in_q_k_v(lowerCamelCase_ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ ) if "norm" in key: SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) # verify on image SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" ) SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor() SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = outputs.logits print(logits.shape ) print("""First values of logits:""" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowerCamelCase_ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __UpperCAmelCase = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
323
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : List[str], *lowerCamelCase : Optional[Any], **lowerCamelCase : Union[str, Any] )-> List[str]: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : Optional[Any], *lowerCamelCase : Tuple, **lowerCamelCase : Tuple )-> List[str]: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : str, *lowerCamelCase : Any, **lowerCamelCase : List[Any] )-> int: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : Optional[Any], *lowerCamelCase : Dict, **lowerCamelCase : List[str] )-> Tuple: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : Any, *lowerCamelCase : Tuple, **lowerCamelCase : int )-> Dict: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : Union[str, Any], *lowerCamelCase : Any, **lowerCamelCase : Optional[int] )-> Tuple: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : str, *lowerCamelCase : Dict, **lowerCamelCase : List[Any] )-> Optional[Any]: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : Tuple, *lowerCamelCase : str, **lowerCamelCase : str )-> Union[str, Any]: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : Optional[Any], *lowerCamelCase : int, **lowerCamelCase : Optional[int] )-> Union[str, Any]: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : int, *lowerCamelCase : Optional[int], **lowerCamelCase : Dict )-> List[Any]: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : Tuple, *lowerCamelCase : str, **lowerCamelCase : Any )-> Union[str, Any]: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : Optional[Any], *lowerCamelCase : List[Any], **lowerCamelCase : Dict )-> Any: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : int )-> List[str]: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : Tuple, *lowerCamelCase : Optional[Any], **lowerCamelCase : int )-> Tuple: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : Optional[int], *lowerCamelCase : List[str], **lowerCamelCase : Optional[Any] )-> Optional[int]: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : int, *lowerCamelCase : Dict, **lowerCamelCase : str )-> List[Any]: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : Optional[Any], *lowerCamelCase : Optional[int], **lowerCamelCase : List[Any] )-> List[str]: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : Tuple, *lowerCamelCase : int, **lowerCamelCase : Dict )-> Dict: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : str, *lowerCamelCase : Optional[Any], **lowerCamelCase : Union[str, Any] )-> Dict: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : Union[str, Any], *lowerCamelCase : List[Any], **lowerCamelCase : Optional[Any] )-> str: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : List[str], *lowerCamelCase : List[str], **lowerCamelCase : Optional[int] )-> str: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : int, *lowerCamelCase : Optional[Any], **lowerCamelCase : Optional[int] )-> Optional[int]: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : Optional[Any], *lowerCamelCase : str, **lowerCamelCase : int )-> List[str]: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : List[str], *lowerCamelCase : Dict, **lowerCamelCase : Optional[Any] )-> List[str]: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : int, *lowerCamelCase : Optional[int], **lowerCamelCase : int )-> List[Any]: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : List[str], *lowerCamelCase : Any, **lowerCamelCase : List[Any] )-> Optional[Any]: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : Optional[Any], *lowerCamelCase : Optional[int], **lowerCamelCase : str )-> Any: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : int, *lowerCamelCase : str, **lowerCamelCase : str )-> Optional[Any]: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : int, *lowerCamelCase : List[str], **lowerCamelCase : Optional[Any] )-> Union[str, Any]: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : Optional[int], *lowerCamelCase : Any, **lowerCamelCase : List[Any] )-> str: requires_backends(self, ['''sentencepiece'''] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ): '''simple docstring''' _a = ['sentencepiece'] def __init__( self : str, *lowerCamelCase : Optional[Any], **lowerCamelCase : str )-> Union[str, Any]: requires_backends(self, ['''sentencepiece'''] )
361
"""simple docstring""" from ..utils import DummyObject, requires_backends class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ): '''simple docstring''' _a = ['torch', 'torchsde'] def __init__( self : Union[str, Any], *lowerCamelCase : str, **lowerCamelCase : int )-> Tuple: requires_backends(self, ['''torch''', '''torchsde'''] ) @classmethod def snake_case ( cls : List[str], *lowerCamelCase : Optional[Any], **lowerCamelCase : Dict )-> str: requires_backends(cls, ['''torch''', '''torchsde'''] ) @classmethod def snake_case ( cls : Tuple, *lowerCamelCase : Dict, **lowerCamelCase : Tuple )-> List[str]: requires_backends(cls, ['''torch''', '''torchsde'''] )
272
0
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa __lowerCAmelCase = logging.getLogger(__name__) class __a ( __UpperCamelCase ): __lowercase : Union[str, Any] = 'summarization' __lowercase : str = ['loss'] __lowercase : List[str] = ROUGE_KEYS __lowercase : Optional[int] = 'rouge2' def __init__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict: '''simple docstring''' if hparams.sortish_sampler and hparams.gpus > 1: lowercase__: Optional[int] = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' ) if hparams.sortish_sampler: raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' ) super().__init__(lowerCAmelCase__ , num_labels=lowerCAmelCase__ , mode=self.mode , **lowerCAmelCase__ ) use_task_specific_params(self.model , 'summarization' ) save_git_info(self.hparams.output_dir ) lowercase__: Dict = Path(self.output_dir ) / 'metrics.json' lowercase__: int = Path(self.output_dir ) / 'hparams.pkl' pickle_save(self.hparams , self.hparams_save_path ) lowercase__: Optional[int] = 0 lowercase__: Optional[Any] = defaultdict(lowerCAmelCase__ ) lowercase__: Union[str, Any] = self.config.model_type lowercase__: List[str] = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size lowercase__: dict = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } lowercase__: List[str] = { 'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test': self.hparams.n_test, } lowercase__: List[Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} lowercase__: Optional[int] = { 'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}' assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) lowercase__: Optional[int] = get_git_info()['repo_sha'] lowercase__: Tuple = hparams.num_workers lowercase__: List[str] = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCAmelCase__ ): lowercase__: Tuple = self.tokenizer.lang_code_to_id[hparams.tgt_lang] lowercase__: Optional[Any] = self.decoder_start_token_id lowercase__: Union[str, Any] = ( SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset ) lowercase__: Any = False lowercase__: int = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: lowercase__: List[Any] = self.hparams.eval_max_gen_length else: lowercase__: Optional[Any] = self.model.config.max_length lowercase__: Dict = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict[str, List[str]]: '''simple docstring''' lowercase__: List[Any] = { k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items() } save_json(lowerCAmelCase__ , Path(self.output_dir ) / 'text_batch.json' ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' ) lowercase__: List[Any] = True return readable_batch def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]: '''simple docstring''' return self.model(lowerCAmelCase__ , **lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict: '''simple docstring''' lowercase__: str = self.tokenizer.batch_decode( lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ ) return lmap(str.strip , lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' lowercase__: Union[str, Any] = self.tokenizer.pad_token_id lowercase__ , lowercase__: Tuple = batch['input_ids'], batch['attention_mask'] lowercase__: int = batch['labels'] if isinstance(self.model , lowerCAmelCase__ ): lowercase__: Tuple = self.model._shift_right(lowerCAmelCase__ ) else: lowercase__: str = shift_tokens_right(lowerCAmelCase__ , lowerCAmelCase__ ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero lowercase__: Dict = decoder_input_ids self.save_readable_batch(lowerCAmelCase__ ) lowercase__: Optional[int] = self(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ , use_cache=lowerCAmelCase__ ) lowercase__: Optional[int] = outputs['logits'] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id lowercase__: Dict = nn.CrossEntropyLoss(ignore_index=lowerCAmelCase__ ) assert lm_logits.shape[-1] == self.vocab_size lowercase__: Optional[Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: lowercase__: str = nn.functional.log_softmax(lowerCAmelCase__ , dim=-1 ) lowercase__ , lowercase__: Optional[Any] = label_smoothed_nll_loss( lowerCAmelCase__ , lowerCAmelCase__ , self.hparams.label_smoothing , ignore_index=lowerCAmelCase__ ) return (loss,) @property def SCREAMING_SNAKE_CASE__ ( self ) -> int: '''simple docstring''' return self.tokenizer.pad_token_id def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: '''simple docstring''' lowercase__: Dict = self._step(lowerCAmelCase__ ) lowercase__: Optional[Any] = dict(zip(self.loss_names , lowerCAmelCase__ ) ) # tokens per batch lowercase__: Optional[Any] = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum() lowercase__: int = batch['input_ids'].shape[0] lowercase__: int = batch['input_ids'].eq(self.pad ).sum() lowercase__: List[Any] = batch['input_ids'].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: '''simple docstring''' return self._generative_step(lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__="val" ) -> Dict: '''simple docstring''' self.step_count += 1 lowercase__: Optional[int] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} lowercase__: List[Any] = losses['loss'] lowercase__: Any = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len'] } lowercase__: Any = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) lowercase__: torch.FloatTensor = torch.tensor(lowerCAmelCase__ ).type_as(lowerCAmelCase__ ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(lowerCAmelCase__ ) lowercase__: Dict = {F'{prefix}_avg_{k}': x for k, x in losses.items()} lowercase__: Tuple = self.step_count self.metrics[prefix].append(lowerCAmelCase__ ) # callback writes this to self.metrics_save_path lowercase__: Optional[int] = flatten_list([x['preds'] for x in outputs] ) return { "log": all_metrics, "preds": preds, F'{prefix}_loss': loss, F'{prefix}_{self.val_metric}': metric_tensor, } def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: '''simple docstring''' return calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> dict: '''simple docstring''' lowercase__: Dict = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') lowercase__: str = self.model.generate( batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowerCAmelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) lowercase__: int = (time.time() - ta) / batch['input_ids'].shape[0] lowercase__: List[str] = self.ids_to_clean_text(lowerCAmelCase__ ) lowercase__: List[str] = self.ids_to_clean_text(batch['labels'] ) lowercase__: Any = self._step(lowerCAmelCase__ ) lowercase__: Any = dict(zip(self.loss_names , lowerCAmelCase__ ) ) lowercase__: Dict = self.calc_generative_metrics(lowerCAmelCase__ , lowerCAmelCase__ ) lowercase__: Optional[Any] = np.mean(lmap(lowerCAmelCase__ , lowerCAmelCase__ ) ) base_metrics.update(gen_time=lowerCAmelCase__ , gen_len=lowerCAmelCase__ , preds=lowerCAmelCase__ , target=lowerCAmelCase__ , **lowerCAmelCase__ ) return base_metrics def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: '''simple docstring''' return self._generative_step(lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]: '''simple docstring''' return self.validation_epoch_end(lowerCAmelCase__ , prefix='test' ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> SeqaSeqDataset: '''simple docstring''' lowercase__: List[str] = self.n_obs[type_path] lowercase__: List[str] = self.target_lens[type_path] lowercase__: int = self.dataset_class( self.tokenizer , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , max_target_length=lowerCAmelCase__ , **self.dataset_kwargs , ) return dataset def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> DataLoader: '''simple docstring''' lowercase__: str = self.get_dataset(lowerCAmelCase__ ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": lowercase__: Optional[Any] = dataset.make_sortish_sampler(lowerCAmelCase__ , distributed=self.hparams.gpus > 1 ) return DataLoader( lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": lowercase__: List[str] = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( lowerCAmelCase__ , batch_sampler=lowerCAmelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , ) def SCREAMING_SNAKE_CASE__ ( self ) -> DataLoader: '''simple docstring''' lowercase__: Optional[int] = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowerCAmelCase__ ) return dataloader def SCREAMING_SNAKE_CASE__ ( self ) -> DataLoader: '''simple docstring''' return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size ) def SCREAMING_SNAKE_CASE__ ( self ) -> DataLoader: '''simple docstring''' return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size ) @staticmethod def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int: '''simple docstring''' BaseTransformer.add_model_specific_args(lowerCAmelCase__ , lowerCAmelCase__ ) add_generic_args(lowerCAmelCase__ , lowerCAmelCase__ ) parser.add_argument( '--max_source_length' , default=1_024 , type=lowerCAmelCase__ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--max_target_length' , default=56 , type=lowerCAmelCase__ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--val_max_target_length' , default=142 , type=lowerCAmelCase__ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--test_max_target_length' , default=142 , type=lowerCAmelCase__ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument('--freeze_encoder' , action='store_true' ) parser.add_argument('--freeze_embeds' , action='store_true' ) parser.add_argument('--sortish_sampler' , action='store_true' , default=lowerCAmelCase__ ) parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowerCAmelCase__ ) parser.add_argument('--max_tokens_per_batch' , type=lowerCAmelCase__ , default=lowerCAmelCase__ ) parser.add_argument('--logger_name' , type=lowerCAmelCase__ , choices=['default', 'wandb', 'wandb_shared'] , default='default' ) parser.add_argument('--n_train' , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help='# examples. -1 means use all.' ) parser.add_argument('--n_val' , type=lowerCAmelCase__ , default=500 , required=lowerCAmelCase__ , help='# examples. -1 means use all.' ) parser.add_argument('--n_test' , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help='# examples. -1 means use all.' ) parser.add_argument( '--task' , type=lowerCAmelCase__ , default='summarization' , required=lowerCAmelCase__ , help='# examples. -1 means use all.' ) parser.add_argument('--label_smoothing' , type=lowerCAmelCase__ , default=0.0 , required=lowerCAmelCase__ ) parser.add_argument('--src_lang' , type=lowerCAmelCase__ , default='' , required=lowerCAmelCase__ ) parser.add_argument('--tgt_lang' , type=lowerCAmelCase__ , default='' , required=lowerCAmelCase__ ) parser.add_argument('--eval_beams' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ ) parser.add_argument( '--val_metric' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , choices=['bleu', 'rouge2', 'loss', None] ) parser.add_argument('--eval_max_gen_length' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='never generate more than n tokens' ) parser.add_argument('--save_top_k' , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help='How many checkpoints to save' ) parser.add_argument( '--early_stopping_patience' , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help=( '-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So' ' val_check_interval will effect it.' ) , ) return parser class __a ( __UpperCamelCase ): __lowercase : Any = 'translation' __lowercase : str = ['loss'] __lowercase : Optional[Any] = ['bleu'] __lowercase : Optional[int] = 'bleu' def __init__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]: '''simple docstring''' super().__init__(lowerCAmelCase__ , **lowerCAmelCase__ ) lowercase__: Dict = hparams.src_lang lowercase__: List[Any] = hparams.tgt_lang def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> dict: '''simple docstring''' return calculate_bleu(lowerCAmelCase__ , lowerCAmelCase__ ) def snake_case_ ( snake_case , snake_case=None ) -> SummarizationModule: Path(args.output_dir ).mkdir(exist_ok=snake_case ) check_output_dir(snake_case , expected_items=3 ) if model is None: if "summarization" in args.task: lowercase__: SummarizationModule = SummarizationModule(snake_case ) else: lowercase__: SummarizationModule = TranslationModule(snake_case ) lowercase__: Any = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('/tmp' ) or str(args.output_dir ).startswith('/var' ) ): lowercase__: int = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger lowercase__: Union[str, Any] = os.environ.get('WANDB_PROJECT' , snake_case ) lowercase__: Tuple = WandbLogger(name=model.output_dir.name , project=snake_case ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger lowercase__: Union[str, Any] = WandbLogger(name=model.output_dir.name , project=f'hf_{dataset}' ) if args.early_stopping_patience >= 0: lowercase__: int = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: lowercase__: int = False lowercase__: int = args.val_metric == 'loss' lowercase__: pl.Trainer = generic_train( snake_case , snake_case , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , snake_case ) , early_stopping_callback=snake_case , logger=snake_case , ) pickle_save(model.hparams , model.output_dir / 'hparams.pkl' ) if not args.do_predict: return model lowercase__: Union[str, Any] = '' lowercase__: Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=snake_case ) ) if checkpoints: lowercase__: Optional[Any] = checkpoints[-1] lowercase__: Any = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() __lowerCAmelCase = pl.Trainer.add_argparse_args(parser) __lowerCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd()) __lowerCAmelCase = parser.parse_args() main(args)
196
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase = { '''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''], '''tokenization_canine''': ['''CanineTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CanineForMultipleChoice''', '''CanineForQuestionAnswering''', '''CanineForSequenceClassification''', '''CanineForTokenClassification''', '''CanineLayer''', '''CanineModel''', '''CaninePreTrainedModel''', '''load_tf_weights_in_canine''', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
196
1
from ...processing_utils import ProcessorMixin class UpperCamelCase_ ( a_ ): lowerCAmelCase_ = '''SpeechT5FeatureExtractor''' lowerCAmelCase_ = '''SpeechT5Tokenizer''' def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]: super().__init__(lowercase_ , lowercase_ ) def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int: _snake_case = kwargs.pop('audio' , lowercase_ ) _snake_case = kwargs.pop('text' , lowercase_ ) _snake_case = kwargs.pop('text_target' , lowercase_ ) _snake_case = kwargs.pop('audio_target' , lowercase_ ) _snake_case = kwargs.pop('sampling_rate' , lowercase_ ) if audio is not None and text is not None: raise ValueError( 'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' ) if audio_target is not None and text_target is not None: raise ValueError( 'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( 'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' ) if audio is not None: _snake_case = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_ ) elif text is not None: _snake_case = self.tokenizer(lowercase_ , **lowercase_ ) else: _snake_case = None if audio_target is not None: _snake_case = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_ ) _snake_case = targets['''input_values'''] elif text_target is not None: _snake_case = self.tokenizer(lowercase_ , **lowercase_ ) _snake_case = targets['''input_ids'''] else: _snake_case = None if inputs is None: return targets if targets is not None: _snake_case = labels _snake_case = targets.get('attention_mask' ) if decoder_attention_mask is not None: _snake_case = decoder_attention_mask return inputs def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]: _snake_case = kwargs.pop('input_values' , lowercase_ ) _snake_case = kwargs.pop('input_ids' , lowercase_ ) _snake_case = kwargs.pop('labels' , lowercase_ ) if input_values is not None and input_ids is not None: raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' ) if input_values is None and input_ids is None and labels is None: raise ValueError( 'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' ) if input_values is not None: _snake_case = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_ ) elif input_ids is not None: _snake_case = self.tokenizer.pad(lowercase_ , **lowercase_ ) else: _snake_case = None if labels is not None: if "input_ids" in labels or (isinstance(lowercase_ , lowercase_ ) and "input_ids" in labels[0]): _snake_case = self.tokenizer.pad(lowercase_ , **lowercase_ ) _snake_case = targets['''input_ids'''] else: _snake_case = self.feature_extractor.feature_size _snake_case = self.feature_extractor.num_mel_bins _snake_case = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_ ) _snake_case = feature_size_hack _snake_case = targets['''input_values'''] else: _snake_case = None if inputs is None: return targets if targets is not None: _snake_case = labels _snake_case = targets.get('attention_mask' ) if decoder_attention_mask is not None: _snake_case = decoder_attention_mask return inputs def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]: return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> str: return self.tokenizer.decode(*lowercase_ , **lowercase_ )
358
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowerCamelCase__ ( ) -> List[str]: '''simple docstring''' _snake_case , _snake_case = 9, 14 # noqa: F841 _snake_case = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _snake_case = defaultdict(UpperCamelCase__ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _snake_case = mst(UpperCamelCase__ ) _snake_case = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _snake_case = tuple(answer[:2] ) _snake_case = tuple(edge[::-1] ) assert edge in result or reverse in result
295
0
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig snake_case__ : Tuple = logging.get_logger(__name__) # General docstring snake_case__ : Tuple = '''MobileNetV1Config''' # Base docstring snake_case__ : Optional[int] = '''google/mobilenet_v1_1.0_224''' snake_case__ : List[str] = [1, 1_024, 7, 7] # Image classification docstring snake_case__ : str = '''google/mobilenet_v1_1.0_224''' snake_case__ : Union[str, Any] = '''tabby, tabby cat''' snake_case__ : Optional[int] = [ '''google/mobilenet_v1_1.0_224''', '''google/mobilenet_v1_0.75_192''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def _snake_case ( _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : int=None ): lowerCAmelCase : List[str] = {} if isinstance(__lowercase , __lowercase ): lowerCAmelCase : Optional[Any] = model.mobilenet_va else: lowerCAmelCase : Optional[int] = model lowerCAmelCase : Any = '''MobilenetV1/Conv2d_0/''' lowerCAmelCase : int = backbone.conv_stem.convolution.weight lowerCAmelCase : Dict = backbone.conv_stem.normalization.bias lowerCAmelCase : str = backbone.conv_stem.normalization.weight lowerCAmelCase : Union[str, Any] = backbone.conv_stem.normalization.running_mean lowerCAmelCase : str = backbone.conv_stem.normalization.running_var for i in range(13 ): lowerCAmelCase : Optional[Any] = i + 1 lowerCAmelCase : str = i * 2 lowerCAmelCase : Dict = backbone.layer[pt_index] lowerCAmelCase : List[str] = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/''' lowerCAmelCase : str = pointer.convolution.weight lowerCAmelCase : Optional[Any] = pointer.normalization.bias lowerCAmelCase : Union[str, Any] = pointer.normalization.weight lowerCAmelCase : int = pointer.normalization.running_mean lowerCAmelCase : Union[str, Any] = pointer.normalization.running_var lowerCAmelCase : int = backbone.layer[pt_index + 1] lowerCAmelCase : List[str] = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/''' lowerCAmelCase : Union[str, Any] = pointer.convolution.weight lowerCAmelCase : int = pointer.normalization.bias lowerCAmelCase : Any = pointer.normalization.weight lowerCAmelCase : Optional[Any] = pointer.normalization.running_mean lowerCAmelCase : Union[str, Any] = pointer.normalization.running_var if isinstance(__lowercase , __lowercase ): lowerCAmelCase : Tuple = '''MobilenetV1/Logits/Conv2d_1c_1x1/''' lowerCAmelCase : str = model.classifier.weight lowerCAmelCase : Optional[int] = model.classifier.bias return tf_to_pt_map def _snake_case ( _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Union[str, Any] ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( '''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ''' '''https://www.tensorflow.org/install/ for installation instructions.''' ) raise # Load weights from TF model lowerCAmelCase : int = tf.train.list_variables(__lowercase ) lowerCAmelCase : Tuple = {} for name, shape in init_vars: logger.info(f'''Loading TF weight {name} with shape {shape}''' ) lowerCAmelCase : Union[str, Any] = tf.train.load_variable(__lowercase , __lowercase ) lowerCAmelCase : Optional[int] = array # Build TF to PyTorch weights loading map lowerCAmelCase : Tuple = _build_tf_to_pytorch_map(__lowercase , __lowercase , __lowercase ) for name, pointer in tf_to_pt_map.items(): logger.info(f'''Importing {name}''' ) if name not in tf_weights: logger.info(f'''{name} not in tf pre-trained weights, skipping''' ) continue lowerCAmelCase : Dict = tf_weights[name] if "depthwise_weights" in name: logger.info('''Transposing depthwise''' ) lowerCAmelCase : List[str] = np.transpose(__lowercase , (2, 3, 0, 1) ) elif "weights" in name: logger.info('''Transposing''' ) if len(pointer.shape ) == 2: # copying into linear layer lowerCAmelCase : Dict = array.squeeze().transpose() else: lowerCAmelCase : int = np.transpose(__lowercase , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' ) logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' ) lowerCAmelCase : List[str] = torch.from_numpy(__lowercase ) tf_weights.pop(__lowercase , __lowercase ) tf_weights.pop(name + '''/RMSProp''' , __lowercase ) tf_weights.pop(name + '''/RMSProp_1''' , __lowercase ) tf_weights.pop(name + '''/ExponentialMovingAverage''' , __lowercase ) logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' ) return model def _snake_case ( _snake_case : Any , _snake_case : Any ): lowerCAmelCase, lowerCAmelCase : Optional[int] = features.shape[-2:] lowerCAmelCase, lowerCAmelCase : List[Any] = conv_layer.stride lowerCAmelCase, lowerCAmelCase : Optional[Any] = conv_layer.kernel_size if in_height % stride_height == 0: lowerCAmelCase : Union[str, Any] = max(kernel_height - stride_height , 0 ) else: lowerCAmelCase : Optional[int] = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: lowerCAmelCase : Dict = max(kernel_width - stride_width , 0 ) else: lowerCAmelCase : str = max(kernel_width - (in_width % stride_width) , 0 ) lowerCAmelCase : str = pad_along_width // 2 lowerCAmelCase : List[str] = pad_along_width - pad_left lowerCAmelCase : Union[str, Any] = pad_along_height // 2 lowerCAmelCase : int = pad_along_height - pad_top lowerCAmelCase : Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(__lowercase , __lowercase , '''constant''' , 0.0 ) class snake_case_( nn.Module ): def __init__( self : int , UpperCamelCase_ : MobileNetVaConfig , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[bool] = True , UpperCamelCase_ : Optional[bool or str] = True , ): super().__init__() lowerCAmelCase : Dict = config if in_channels % groups != 0: raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' ) if out_channels % groups != 0: raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' ) lowerCAmelCase : int = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) lowerCAmelCase : List[Any] = nn.Convad( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=__UpperCAmelCase , groups=__UpperCAmelCase , bias=__UpperCAmelCase , padding_mode='''zeros''' , ) if use_normalization: lowerCAmelCase : List[str] = nn.BatchNormad( num_features=__UpperCAmelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=__UpperCAmelCase , track_running_stats=__UpperCAmelCase , ) else: lowerCAmelCase : Union[str, Any] = None if use_activation: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase : List[Any] = ACTaFN[use_activation] elif isinstance(config.hidden_act , __UpperCAmelCase ): lowerCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act] else: lowerCAmelCase : Dict = config.hidden_act else: lowerCAmelCase : Tuple = None def lowerCamelCase__ ( self : str , UpperCamelCase_ : torch.Tensor ): if self.config.tf_padding: lowerCAmelCase : Dict = apply_tf_padding(__UpperCAmelCase , self.convolution ) lowerCAmelCase : Any = self.convolution(__UpperCAmelCase ) if self.normalization is not None: lowerCAmelCase : Optional[Any] = self.normalization(__UpperCAmelCase ) if self.activation is not None: lowerCAmelCase : str = self.activation(__UpperCAmelCase ) return features class snake_case_( snake_case_ ): __UpperCamelCase = MobileNetVaConfig __UpperCamelCase = load_tf_weights_in_mobilenet_va __UpperCamelCase = '''mobilenet_v1''' __UpperCamelCase = '''pixel_values''' __UpperCamelCase = False def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Union[nn.Linear, nn.Convad] ): if isinstance(__UpperCAmelCase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__UpperCAmelCase , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) snake_case__ : int = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' snake_case__ : List[Any] = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , snake_case_ , ) class snake_case_( snake_case_ ): def __init__( self : List[Any] , UpperCamelCase_ : MobileNetVaConfig , UpperCamelCase_ : bool = True ): super().__init__(__UpperCAmelCase ) lowerCAmelCase : Union[str, Any] = config lowerCAmelCase : Dict = 3_2 lowerCAmelCase : str = max(int(depth * config.depth_multiplier ) , config.min_depth ) lowerCAmelCase : Tuple = MobileNetVaConvLayer( __UpperCAmelCase , in_channels=config.num_channels , out_channels=__UpperCAmelCase , kernel_size=3 , stride=2 , ) lowerCAmelCase : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] lowerCAmelCase : Union[str, Any] = nn.ModuleList() for i in range(1_3 ): lowerCAmelCase : Tuple = out_channels if strides[i] == 2 or i == 0: depth *= 2 lowerCAmelCase : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=3 , stride=strides[i] , groups=__UpperCAmelCase , ) ) self.layer.append( MobileNetVaConvLayer( __UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=1 , ) ) lowerCAmelCase : Any = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[str] ): raise NotImplementedError @add_start_docstrings_to_model_forward(__UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[bool] = None , ): lowerCAmelCase : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('''You have to specify pixel_values''' ) lowerCAmelCase : Optional[int] = self.conv_stem(__UpperCAmelCase ) lowerCAmelCase : Optional[int] = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): lowerCAmelCase : Union[str, Any] = layer_module(__UpperCAmelCase ) if output_hidden_states: lowerCAmelCase : Dict = all_hidden_states + (hidden_states,) lowerCAmelCase : Any = hidden_states if self.pooler is not None: lowerCAmelCase : List[str] = torch.flatten(self.pooler(__UpperCAmelCase ) , start_dim=1 ) else: lowerCAmelCase : Union[str, Any] = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__UpperCAmelCase , pooler_output=__UpperCAmelCase , hidden_states=__UpperCAmelCase , ) @add_start_docstrings( ''' MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , snake_case_ , ) class snake_case_( snake_case_ ): def __init__( self : List[str] , UpperCamelCase_ : MobileNetVaConfig ): super().__init__(__UpperCAmelCase ) lowerCAmelCase : str = config.num_labels lowerCAmelCase : str = MobileNetVaModel(__UpperCAmelCase ) lowerCAmelCase : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head lowerCAmelCase : Union[str, Any] = nn.Dropout(config.classifier_dropout_prob , inplace=__UpperCAmelCase ) lowerCAmelCase : Any = nn.Linear(__UpperCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[bool] = None , ): lowerCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase : Any = self.mobilenet_va(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase ) lowerCAmelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1] lowerCAmelCase : Optional[int] = self.classifier(self.dropout(__UpperCAmelCase ) ) lowerCAmelCase : Tuple = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowerCAmelCase : Tuple = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowerCAmelCase : List[str] = '''single_label_classification''' else: lowerCAmelCase : List[str] = '''multi_label_classification''' if self.config.problem_type == "regression": lowerCAmelCase : Dict = MSELoss() if self.num_labels == 1: lowerCAmelCase : int = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowerCAmelCase : int = loss_fct(__UpperCAmelCase , __UpperCAmelCase ) elif self.config.problem_type == "single_label_classification": lowerCAmelCase : str = CrossEntropyLoss() lowerCAmelCase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowerCAmelCase : str = BCEWithLogitsLoss() lowerCAmelCase : Any = loss_fct(__UpperCAmelCase , __UpperCAmelCase ) if not return_dict: lowerCAmelCase : Any = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states , )
60
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = CycleDiffusionPipeline snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''} snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) _A = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _A = CLIPTextModel(__UpperCAmelCase ) _A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=0 ): '''simple docstring''' _A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _A = image / 2 + 0.5 if str(__UpperCAmelCase ).startswith("mps" ): _A = torch.manual_seed(__UpperCAmelCase ) else: _A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _A = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = "cpu" # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.get_dummy_components() for name, module in components.items(): if hasattr(__UpperCAmelCase , "half" ): _A = module.half() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def lowerCAmelCase ( self : Any ): '''simple docstring''' return super().test_save_load_local() @unittest.skip("non-deterministic pipeline" ) def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_inference_batch_single_identical() @skip_mps def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained( __UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images assert np.abs(image - expected_image ).max() < 2E-2
79
0
import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def lowerCamelCase_ ( lowerCamelCase__ ): lowerCamelCase_ = model.config lowerCamelCase_ = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 1_6, 3_2] , window_size=original_config.window_size , embed_dim=1_2_8 , ) lowerCamelCase_ = MBartConfig( is_decoder=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , add_cross_attention=lowerCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=lowerCamelCase__ , add_final_layer_norm=lowerCamelCase__ , ) return encoder_config, decoder_config def lowerCamelCase_ ( lowerCamelCase__ ): if "encoder.model" in name: lowerCamelCase_ = name.replace("encoder.model" , "encoder" ) if "decoder.model" in name: lowerCamelCase_ = name.replace("decoder.model" , "decoder" ) if "patch_embed.proj" in name: lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: lowerCamelCase_ = name.replace("patch_embed.norm" , "embeddings.norm" ) if name.startswith("encoder" ): if "layers" in name: lowerCamelCase_ = "encoder." + name if "attn.proj" in name: lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name and "mask" not in name: lowerCamelCase_ = name.replace("attn" , "attention.self" ) if "norm1" in name: lowerCamelCase_ = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: lowerCamelCase_ = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" ) if name == "encoder.norm.weight": lowerCamelCase_ = "encoder.layernorm.weight" if name == "encoder.norm.bias": lowerCamelCase_ = "encoder.layernorm.bias" return name def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): for key in orig_state_dict.copy().keys(): lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ ) if "qkv" in key: lowerCamelCase_ = key.split("." ) lowerCamelCase_ = int(key_split[3] ) lowerCamelCase_ = int(key_split[5] ) lowerCamelCase_ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowerCamelCase_ = val[:dim, :] lowerCamelCase_ = val[dim : dim * 2, :] lowerCamelCase_ = val[-dim:, :] else: lowerCamelCase_ = val[:dim] lowerCamelCase_ = val[dim : dim * 2] lowerCamelCase_ = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: lowerCamelCase_ = val return orig_state_dict def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=False ): # load original model lowerCamelCase_ = DonutModel.from_pretrained(lowerCamelCase__ ).eval() # load HuggingFace model lowerCamelCase_ , lowerCamelCase_ = get_configs(lowerCamelCase__ ) lowerCamelCase_ = DonutSwinModel(lowerCamelCase__ ) lowerCamelCase_ = MBartForCausalLM(lowerCamelCase__ ) lowerCamelCase_ = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ ) model.eval() lowerCamelCase_ = original_model.state_dict() lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) # verify results on scanned document lowerCamelCase_ = load_dataset("hf-internal-testing/example-documents" ) lowerCamelCase_ = dataset["test"][0]["image"].convert("RGB" ) lowerCamelCase_ = XLMRobertaTokenizerFast.from_pretrained(lowerCamelCase__ , from_slow=lowerCamelCase__ ) lowerCamelCase_ = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) lowerCamelCase_ = DonutProcessor(lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ = processor(lowerCamelCase__ , return_tensors="pt" ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": lowerCamelCase_ = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" lowerCamelCase_ = "When is the coffee break?" lowerCamelCase_ = task_prompt.replace("{user_input}" , lowerCamelCase__ ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": lowerCamelCase_ = "<s_rvlcdip>" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: lowerCamelCase_ = "<s_cord>" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": lowerCamelCase_ = "s_cord-v2>" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": lowerCamelCase_ = "<s_zhtrainticket>" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt lowerCamelCase_ = "hello world" else: raise ValueError("Model name not supported" ) lowerCamelCase_ = original_model.decoder.tokenizer(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors="pt" )[ "input_ids" ] lowerCamelCase_ = original_model.encoder.model.patch_embed(lowerCamelCase__ ) lowerCamelCase_ , lowerCamelCase_ = model.encoder.embeddings(lowerCamelCase__ ) assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) # verify encoder hidden states lowerCamelCase_ = original_model.encoder(lowerCamelCase__ ) lowerCamelCase_ = model.encoder(lowerCamelCase__ ).last_hidden_state assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-2 ) # verify decoder hidden states lowerCamelCase_ = original_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).logits lowerCamelCase_ = model(lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ).logits assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'Saving model and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" ) processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" ) if __name__ == "__main__": __A =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''naver-clova-ix/donut-base-finetuned-docvqa''', required=False, type=str, help='''Name of the original model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, required=False, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model and processor to the 🤗 hub.''', ) __A =parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
47
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class _SCREAMING_SNAKE_CASE : def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Optional[int]: lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE_( self ) -> Tuple: return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]: lowerCamelCase_ = BioGptModel(config=lowercase ) model.to(lowercase ) model.eval() lowerCamelCase_ = model(lowercase , attention_mask=lowercase ) lowerCamelCase_ = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any: lowerCamelCase_ = BioGptForCausalLM(config=lowercase ) model.to(lowercase ) model.eval() lowerCamelCase_ = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Tuple: lowerCamelCase_ = BioGptModel(config=lowercase ) model.to(lowercase ) model.eval() # create attention mask lowerCamelCase_ = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase ) lowerCamelCase_ = self.seq_length // 2 lowerCamelCase_ = 0 # first forward pass lowerCamelCase_ , lowerCamelCase_ = model(lowercase , attention_mask=lowercase ).to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids lowerCamelCase_ = ids_tensor((1,) , lowercase ).item() + 1 lowerCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) lowerCamelCase_ = random_other_next_tokens # append to next input_ids and attn_mask lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCamelCase_ = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase )] , dim=1 , ) # get two different outputs lowerCamelCase_ = model(lowercase , attention_mask=lowercase )["last_hidden_state"] lowerCamelCase_ = model(lowercase , past_key_values=lowercase , attention_mask=lowercase )["last_hidden_state"] # select random slice lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCamelCase_ = output_from_no_past[:, -1, random_slice_idx].detach() lowerCamelCase_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-3 ) ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict: lowerCamelCase_ = BioGptModel(config=lowercase ).to(lowercase ).eval() lowerCamelCase_ = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase ) # first forward pass lowerCamelCase_ = model(lowercase , attention_mask=lowercase , use_cache=lowercase ) lowerCamelCase_ , lowerCamelCase_ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCamelCase_ = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCamelCase_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) lowerCamelCase_ = model(lowercase , attention_mask=lowercase )["last_hidden_state"] lowerCamelCase_ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[ "last_hidden_state" ] # select random slice lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCamelCase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-3 ) ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase , lowercase=False ) -> Optional[int]: lowerCamelCase_ = BioGptForCausalLM(lowercase ) model.to(lowercase ) if gradient_checkpointing: model.gradient_checkpointing_enable() lowerCamelCase_ = model(lowercase , labels=lowercase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def SCREAMING_SNAKE_CASE_( self , lowercase , *lowercase ) -> List[Any]: lowerCamelCase_ = BioGptModel(lowercase ) lowerCamelCase_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Any: lowerCamelCase_ = self.num_labels lowerCamelCase_ = BioGptForTokenClassification(lowercase ) model.to(lowercase ) model.eval() lowerCamelCase_ = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE_( self ) -> List[str]: lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): lowerCAmelCase__ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) lowerCAmelCase__ = (BioGptForCausalLM,) if is_torch_available() else () lowerCAmelCase__ = ( { 'feature-extraction': BioGptModel, 'text-classification': BioGptForSequenceClassification, 'text-generation': BioGptForCausalLM, 'token-classification': BioGptForTokenClassification, 'zero-shot': BioGptForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase__ = False def SCREAMING_SNAKE_CASE_( self ) -> Any: lowerCamelCase_ = BioGptModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 ) def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_( self ) -> List[str]: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase_ = type self.model_tester.create_and_check_model(*lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*lowercase , gradient_checkpointing=lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> Dict: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> int: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> List[str]: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase ) @slow def SCREAMING_SNAKE_CASE_( self ) -> Dict: lowerCamelCase_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(lowercase ) lowerCamelCase_ = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) lowerCamelCase_ = "left" # Define PAD Token = EOS Token = 50256 lowerCamelCase_ = tokenizer.eos_token lowerCamelCase_ = model.config.eos_token_id # use different length sentences to test batching lowerCamelCase_ = [ "Hello, my dog is a little", "Today, I", ] lowerCamelCase_ = tokenizer(lowercase , return_tensors="pt" , padding=lowercase ) lowerCamelCase_ = inputs["input_ids"].to(lowercase ) lowerCamelCase_ = model.generate( input_ids=lowercase , attention_mask=inputs["attention_mask"].to(lowercase ) , ) lowerCamelCase_ = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(lowercase ) lowerCamelCase_ = model.generate(input_ids=lowercase ) lowerCamelCase_ = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() lowerCamelCase_ = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(lowercase ) lowerCamelCase_ = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings ) lowerCamelCase_ = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) lowerCamelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase ) lowerCamelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase ) lowerCamelCase_ = [ "Hello, my dog is a little bit bigger than a little bit.", "Today, I have a good idea of how to use the information", ] self.assertListEqual(lowercase , lowercase ) self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] ) @slow def SCREAMING_SNAKE_CASE_( self ) -> int: for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = BioGptModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> Tuple: lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = 3 lowerCamelCase_ = input_dict["input_ids"] lowerCamelCase_ = input_ids.ne(1 ).to(lowercase ) lowerCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCamelCase_ = BioGptForSequenceClassification(lowercase ) model.to(lowercase ) model.eval() lowerCamelCase_ = model(lowercase , attention_mask=lowercase , labels=lowercase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def SCREAMING_SNAKE_CASE_( self ) -> Dict: lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = 3 lowerCamelCase_ = "multi_label_classification" lowerCamelCase_ = input_dict["input_ids"] lowerCamelCase_ = input_ids.ne(1 ).to(lowercase ) lowerCamelCase_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCamelCase_ = BioGptForSequenceClassification(lowercase ) model.to(lowercase ) model.eval() lowerCamelCase_ = model(lowercase , attention_mask=lowercase , labels=lowercase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: lowerCamelCase_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) lowerCamelCase_ = torch.tensor([[2, 4805, 9, 656, 21]] ) lowerCamelCase_ = model(lowercase )[0] lowerCamelCase_ = 42384 lowerCamelCase_ = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , lowercase ) lowerCamelCase_ = torch.tensor( [[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4 ) ) @slow def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: lowerCamelCase_ = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) lowerCamelCase_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(lowercase ) torch.manual_seed(0 ) lowerCamelCase_ = tokenizer("COVID-19 is" , return_tensors="pt" ).to(lowercase ) lowerCamelCase_ = model.generate( **lowercase , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=lowercase , ) lowerCamelCase_ = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase ) lowerCamelCase_ = ( "COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the" " causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and" " territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK)," " and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and" " more than 800,000 deaths." ) self.assertEqual(lowercase , lowercase )
47
1
import requests def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: """simple docstring""" A : List[str] = {"""Content-Type""": """application/json"""} A : List[str] = requests.post(_lowerCAmelCase , json={"""text""": message_body} , headers=_lowerCAmelCase ) if response.status_code != 200: A : Optional[int] = ( """Request to slack returned an error """ f'''{response.status_code}, the response is:\n{response.text}''' ) raise ValueError(_lowerCAmelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
116
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class _UpperCamelCase ( A , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = AlbertTokenizer lowerCAmelCase__ = AlbertTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = True def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __lowercase =AlbertTokenizer(_lowerCAmelCase) tokenizer.save_pretrained(self.tmpdirname) def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : Tuple): '''simple docstring''' __lowercase ='this is a test' __lowercase ='this is a test' return input_text, output_text def __lowerCamelCase ( self : int): '''simple docstring''' __lowercase ='<pad>' __lowercase =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase) , _lowerCAmelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase) , _lowerCAmelCase) def __lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __lowercase =list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<pad>') self.assertEqual(vocab_keys[1] , '<unk>') self.assertEqual(vocab_keys[-1] , '▁eloquent') self.assertEqual(len(_lowerCAmelCase) , 3_0_0_0_0) def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0) def __lowerCamelCase ( self : int): '''simple docstring''' if not self.test_rust_tokenizer: return __lowercase =self.get_tokenizer() __lowercase =self.get_rust_tokenizer() __lowercase ='I was born in 92000, and this is falsé.' __lowercase =tokenizer.tokenize(_lowerCAmelCase) __lowercase =rust_tokenizer.tokenize(_lowerCAmelCase) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase) __lowercase =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase) __lowercase =rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase) __lowercase =self.get_rust_tokenizer() __lowercase =tokenizer.encode(_lowerCAmelCase) __lowercase =rust_tokenizer.encode(_lowerCAmelCase) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase) def __lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __lowercase =AlbertTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase) __lowercase =tokenizer.tokenize('This is a test') self.assertListEqual(_lowerCAmelCase , ['▁this', '▁is', '▁a', '▁test']) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase) , [4_8, 2_5, 2_1, 1_2_8_9]) __lowercase =tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( _lowerCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.']) __lowercase =tokenizer.convert_tokens_to_ids(_lowerCAmelCase) self.assertListEqual(_lowerCAmelCase , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]) __lowercase =tokenizer.convert_ids_to_tokens(_lowerCAmelCase) self.assertListEqual( _lowerCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , ) def __lowerCamelCase ( self : List[Any]): '''simple docstring''' __lowercase =AlbertTokenizer(_lowerCAmelCase) __lowercase =tokenizer.encode('sequence builders') __lowercase =tokenizer.encode('multi-sequence build') __lowercase =tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase) __lowercase =tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def __lowerCamelCase ( self : str): '''simple docstring''' __lowercase ={'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCAmelCase , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
166
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase_ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["BartphoTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
361
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class a_ ( _snake_case ): UpperCamelCase__ : Optional[Any] =(DPMSolverSinglestepScheduler,) UpperCamelCase__ : Tuple =(("num_inference_steps", 25),) def __a ( self :List[Any] , **_lowercase :Optional[Any]) -> int: UpperCAmelCase_ = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf'''), '''variance_type''': None, } config.update(**_lowercase) return config def __a ( self :Union[str, Any] , _lowercase :List[Any]=0 , **_lowercase :Optional[int]) -> List[Any]: UpperCAmelCase_ = dict(self.forward_default_kwargs) UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config(**_lowercase) UpperCAmelCase_ = scheduler_class(**_lowercase) scheduler.set_timesteps(_lowercase) # copy over dummy past residuals UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowercase) UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase) new_scheduler.set_timesteps(_lowercase) # copy over dummy past residuals UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase_ , UpperCAmelCase_ = sample, sample for t in range(_lowercase , time_step + scheduler.config.solver_order + 1): UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def __a ( self :Union[str, Any]) -> List[Any]: pass def __a ( self :Optional[Any] , _lowercase :str=0 , **_lowercase :Union[str, Any]) -> Dict: UpperCAmelCase_ = dict(self.forward_default_kwargs) UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**_lowercase) scheduler.set_timesteps(_lowercase) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowercase) UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase) # copy over dummy past residuals new_scheduler.set_timesteps(_lowercase) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def __a ( self :Dict , _lowercase :Union[str, Any]=None , **_lowercase :List[Any]) -> int: if scheduler is None: UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(**_lowercase) UpperCAmelCase_ = scheduler_class(**_lowercase) UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(**_lowercase) UpperCAmelCase_ = scheduler_class(**_lowercase) UpperCAmelCase_ = 10 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter scheduler.set_timesteps(_lowercase) for i, t in enumerate(scheduler.timesteps): UpperCAmelCase_ = model(_lowercase , _lowercase) UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample return sample def __a ( self :int) -> Tuple: UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) UpperCAmelCase_ = 50 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter scheduler.set_timesteps(_lowercase) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:]): UpperCAmelCase_ = model(_lowercase , _lowercase) UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample UpperCAmelCase_ = torch.mean(torch.abs(_lowercase)) assert abs(result_mean.item() - 0.2_574) < 1E-3 def __a ( self :List[Any]) -> List[Any]: for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=_lowercase) def __a ( self :int) -> Optional[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) UpperCAmelCase_ = self.full_loop(scheduler=_lowercase) UpperCAmelCase_ = torch.mean(torch.abs(_lowercase)) assert abs(result_mean.item() - 0.2_791) < 1E-3 UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config) UpperCAmelCase_ = self.full_loop(scheduler=_lowercase) UpperCAmelCase_ = torch.mean(torch.abs(_lowercase)) assert abs(result_mean.item() - 0.2_791) < 1E-3 def __a ( self :Tuple) -> int: self.check_over_configs(thresholding=_lowercase) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , algorithm_type='''dpmsolver++''' , solver_order=_lowercase , solver_type=_lowercase , ) def __a ( self :List[Any]) -> Any: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowercase) def __a ( self :Any) -> Optional[int]: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , ) UpperCAmelCase_ = self.full_loop( solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , ) assert not torch.isnan(_lowercase).any(), "Samples have nan numbers" def __a ( self :Tuple) -> int: self.check_over_configs(lower_order_final=_lowercase) self.check_over_configs(lower_order_final=_lowercase) def __a ( self :Tuple) -> Optional[Any]: self.check_over_configs(lambda_min_clipped=-float('''inf''')) self.check_over_configs(lambda_min_clipped=-5.1) def __a ( self :Any) -> List[str]: self.check_over_configs(variance_type=_lowercase) self.check_over_configs(variance_type='''learned_range''') def __a ( self :Any) -> Dict: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=_lowercase , time_step=0) def __a ( self :Dict) -> Union[str, Any]: UpperCAmelCase_ = self.full_loop() UpperCAmelCase_ = torch.mean(torch.abs(_lowercase)) assert abs(result_mean.item() - 0.2_791) < 1E-3 def __a ( self :Any) -> Union[str, Any]: UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_lowercase) UpperCAmelCase_ = torch.mean(torch.abs(_lowercase)) assert abs(result_mean.item() - 0.2_248) < 1E-3 def __a ( self :str) -> Optional[int]: UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''') UpperCAmelCase_ = torch.mean(torch.abs(_lowercase)) assert abs(result_mean.item() - 0.1_453) < 1E-3 def __a ( self :List[Any]) -> Dict: UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_lowercase) UpperCAmelCase_ = torch.mean(torch.abs(_lowercase)) assert abs(result_mean.item() - 0.0_649) < 1E-3 def __a ( self :Any) -> Optional[Any]: UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(thresholding=_lowercase , dynamic_thresholding_ratio=0) UpperCAmelCase_ = scheduler_class(**_lowercase) UpperCAmelCase_ = 10 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter.half() scheduler.set_timesteps(_lowercase) for i, t in enumerate(scheduler.timesteps): UpperCAmelCase_ = model(_lowercase , _lowercase) UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample assert sample.dtype == torch.floataa
344
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = ["EncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : int = ["TFEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[Any] = ["FlaxEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
316
"""simple docstring""" import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def A ( snake_case :Union[str, Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ) -> str: __UpperCamelCase = s.rsplit(snake_case , snake_case ) return new.join(snake_case ) def A ( snake_case :List[Any] ) -> int: # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() ) def A ( snake_case :str ) -> Union[str, Any]: __UpperCamelCase = {} __UpperCamelCase = ['group_1', 'group_2', 'group_3', 'group_4'] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: __UpperCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' ) if "res_path" in key: __UpperCamelCase = key.replace('res_path.' , 'res_path.path.' ) if key.endswith('.w' ): __UpperCamelCase = rreplace(snake_case , '.w' , '.weight' , 1 ) if key.endswith('.b' ): __UpperCamelCase = rreplace(snake_case , '.b' , '.bias' , 1 ) __UpperCamelCase = value.float() return upgrade @torch.no_grad() def A ( snake_case :List[str] , snake_case :Tuple , snake_case :List[Any]=None , snake_case :str=True ) -> int: from dall_e import Encoder __UpperCamelCase = Encoder() if os.path.exists(snake_case ): __UpperCamelCase = torch.load(snake_case ) else: __UpperCamelCase = torch.hub.load_state_dict_from_url(snake_case ) if isinstance(snake_case , snake_case ): __UpperCamelCase = ckpt.state_dict() encoder.load_state_dict(snake_case ) if config_path is not None: __UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(snake_case ) else: __UpperCamelCase = FlavaImageCodebookConfig() __UpperCamelCase = FlavaImageCodebook(snake_case ).eval() __UpperCamelCase = encoder.state_dict() __UpperCamelCase = upgrade_state_dict(snake_case ) hf_model.load_state_dict(snake_case ) __UpperCamelCase = hf_model.state_dict() __UpperCamelCase = count_parameters(snake_case ) __UpperCamelCase = count_parameters(snake_case ) assert torch.allclose(snake_case , snake_case , atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(snake_case ) else: return hf_state_dict if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") UpperCamelCase : int = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
316
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[str] = logging.get_logger(__name__) a : Dict = { """facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""", # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class UpperCamelCase_ ( __magic_name__ ): lowercase = 'wav2vec2' def __init__( self , A=32 , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=0.1 , A=0.0 , A=0.0 , A=0.1 , A=0.1 , A=0.0_2 , A=1e-5 , A="group" , A="gelu" , A=(512, 512, 512, 512, 512, 512, 512) , A=(5, 2, 2, 2, 2, 2, 2) , A=(10, 3, 3, 3, 3, 2, 2) , A=False , A=128 , A=16 , A=False , A=True , A=0.0_5 , A=10 , A=2 , A=0.0 , A=10 , A=0 , A=320 , A=2 , A=0.1 , A=100 , A=256 , A=256 , A=0.1 , A="sum" , A=False , A=False , A=256 , A=(512, 512, 512, 512, 1500) , A=(5, 3, 3, 1, 1) , A=(1, 2, 3, 1, 1) , A=512 , A=0 , A=1 , A=2 , A=False , A=3 , A=2 , A=3 , A=None , A=None , **A , ) -> str: super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A ) UpperCAmelCase : Dict = hidden_size UpperCAmelCase : str = feat_extract_norm UpperCAmelCase : Any = feat_extract_activation UpperCAmelCase : List[Any] = list(A ) UpperCAmelCase : str = list(A ) UpperCAmelCase : Optional[Any] = list(A ) UpperCAmelCase : Union[str, Any] = conv_bias UpperCAmelCase : List[Any] = num_conv_pos_embeddings UpperCAmelCase : str = num_conv_pos_embedding_groups UpperCAmelCase : Optional[Any] = len(self.conv_dim ) UpperCAmelCase : Any = num_hidden_layers UpperCAmelCase : Union[str, Any] = intermediate_size UpperCAmelCase : Optional[Any] = hidden_act UpperCAmelCase : Tuple = num_attention_heads UpperCAmelCase : Union[str, Any] = hidden_dropout UpperCAmelCase : Tuple = attention_dropout UpperCAmelCase : Union[str, Any] = activation_dropout UpperCAmelCase : Any = feat_proj_dropout UpperCAmelCase : Tuple = final_dropout UpperCAmelCase : Tuple = layerdrop UpperCAmelCase : Any = layer_norm_eps UpperCAmelCase : Dict = initializer_range UpperCAmelCase : Any = vocab_size UpperCAmelCase : Tuple = do_stable_layer_norm UpperCAmelCase : str = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase : List[Any] = apply_spec_augment UpperCAmelCase : List[str] = mask_time_prob UpperCAmelCase : Any = mask_time_length UpperCAmelCase : List[Any] = mask_time_min_masks UpperCAmelCase : Optional[int] = mask_feature_prob UpperCAmelCase : Union[str, Any] = mask_feature_length UpperCAmelCase : List[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations UpperCAmelCase : List[str] = num_codevectors_per_group UpperCAmelCase : List[str] = num_codevector_groups UpperCAmelCase : List[str] = contrastive_logits_temperature UpperCAmelCase : Optional[int] = feat_quantizer_dropout UpperCAmelCase : List[Any] = num_negatives UpperCAmelCase : Optional[int] = codevector_dim UpperCAmelCase : int = proj_codevector_dim UpperCAmelCase : Optional[Any] = diversity_loss_weight # ctc loss UpperCAmelCase : Tuple = ctc_loss_reduction UpperCAmelCase : str = ctc_zero_infinity # adapter UpperCAmelCase : int = add_adapter UpperCAmelCase : str = adapter_kernel_size UpperCAmelCase : Tuple = adapter_stride UpperCAmelCase : Dict = num_adapter_layers UpperCAmelCase : Tuple = output_hidden_size or hidden_size UpperCAmelCase : Optional[Any] = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. UpperCAmelCase : Optional[Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. UpperCAmelCase : List[str] = list(A ) UpperCAmelCase : Union[str, Any] = list(A ) UpperCAmelCase : int = list(A ) UpperCAmelCase : Any = xvector_output_dim @property def _lowercase( self ) -> Tuple: return functools.reduce(operator.mul , self.conv_stride , 1 )
338
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule a : Optional[int] = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys a : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
338
1
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _lowercase ( A__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = DebertaTokenizer SCREAMING_SNAKE_CASE__ : Union[str, Any] = True SCREAMING_SNAKE_CASE__ : Optional[int] = DebertaTokenizerFast def __magic_name__( self :List[str] ) -> str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] __SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) __SCREAMING_SNAKE_CASE : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __SCREAMING_SNAKE_CASE : Union[str, Any] = {'''unk_token''': '''[UNK]'''} __SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowerCAmelCase__ ) ) def __magic_name__( self :Any , **lowerCAmelCase__ :str ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def __magic_name__( self :int , lowerCAmelCase__ :int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : Optional[int] = '''lower newer''' __SCREAMING_SNAKE_CASE : int = '''lower newer''' return input_text, output_text def __magic_name__( self :str ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Tuple = '''lower newer''' __SCREAMING_SNAKE_CASE : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token] __SCREAMING_SNAKE_CASE : Optional[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : List[str] = tokenizer('''Hello''' , '''World''' ) __SCREAMING_SNAKE_CASE : Dict = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , lowerCAmelCase__ ) @slow def __magic_name__( self :Optional[int] ) -> List[Any]: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __SCREAMING_SNAKE_CASE : int = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode( '''sequence builders''' , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def __magic_name__( self :Any ) -> Dict: __SCREAMING_SNAKE_CASE : Tuple = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __SCREAMING_SNAKE_CASE : Dict = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = [tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) for seq in encoding['''input_ids''']] # fmt: off __SCREAMING_SNAKE_CASE : List[Any] = { '''input_ids''': [ [1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on __SCREAMING_SNAKE_CASE : List[str] = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , lowerCAmelCase__ ) for expected, decoded in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
9
from __future__ import annotations def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Dict = 0.00 __SCREAMING_SNAKE_CASE : List[str] = 0 for resistor in resistors: if resistor <= 0: __SCREAMING_SNAKE_CASE : Any = F'''Resistor at index {index} has a negative or zero value!''' raise ValueError(lowercase__ ) first_sum += 1 / float(lowercase__ ) index += 1 return 1 / first_sum def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Tuple = 0.00 __SCREAMING_SNAKE_CASE : int = 0 for resistor in resistors: sum_r += resistor if resistor < 0: __SCREAMING_SNAKE_CASE : Tuple = F'''Resistor at index {index} has a negative value!''' raise ValueError(lowercase__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
9
1
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase :List[str] = logging.get_logger(__name__) lowerCAmelCase :str = { '''b0''': efficientnet.EfficientNetBa, '''b1''': efficientnet.EfficientNetBa, '''b2''': efficientnet.EfficientNetBa, '''b3''': efficientnet.EfficientNetBa, '''b4''': efficientnet.EfficientNetBa, '''b5''': efficientnet.EfficientNetBa, '''b6''': efficientnet.EfficientNetBa, '''b7''': efficientnet.EfficientNetBa, } lowerCAmelCase :Tuple = { '''b0''': { '''hidden_dim''': 1_2_8_0, '''width_coef''': 1.0, '''depth_coef''': 1.0, '''image_size''': 2_2_4, '''dropout_rate''': 0.2, '''dw_padding''': [], }, '''b1''': { '''hidden_dim''': 1_2_8_0, '''width_coef''': 1.0, '''depth_coef''': 1.1, '''image_size''': 2_4_0, '''dropout_rate''': 0.2, '''dw_padding''': [1_6], }, '''b2''': { '''hidden_dim''': 1_4_0_8, '''width_coef''': 1.1, '''depth_coef''': 1.2, '''image_size''': 2_6_0, '''dropout_rate''': 0.3, '''dw_padding''': [5, 8, 1_6], }, '''b3''': { '''hidden_dim''': 1_5_3_6, '''width_coef''': 1.2, '''depth_coef''': 1.4, '''image_size''': 3_0_0, '''dropout_rate''': 0.3, '''dw_padding''': [5, 1_8], }, '''b4''': { '''hidden_dim''': 1_7_9_2, '''width_coef''': 1.4, '''depth_coef''': 1.8, '''image_size''': 3_8_0, '''dropout_rate''': 0.4, '''dw_padding''': [6], }, '''b5''': { '''hidden_dim''': 2_0_4_8, '''width_coef''': 1.6, '''depth_coef''': 2.2, '''image_size''': 4_5_6, '''dropout_rate''': 0.4, '''dw_padding''': [1_3, 2_7], }, '''b6''': { '''hidden_dim''': 2_3_0_4, '''width_coef''': 1.8, '''depth_coef''': 2.6, '''image_size''': 5_2_8, '''dropout_rate''': 0.5, '''dw_padding''': [3_1], }, '''b7''': { '''hidden_dim''': 2_5_6_0, '''width_coef''': 2.0, '''depth_coef''': 3.1, '''image_size''': 6_0_0, '''dropout_rate''': 0.5, '''dw_padding''': [1_8], }, } def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ): """simple docstring""" __magic_name__ : List[str] = EfficientNetConfig() __magic_name__ : int = CONFIG_MAP[model_name]['hidden_dim'] __magic_name__ : Dict = CONFIG_MAP[model_name]['width_coef'] __magic_name__ : str = CONFIG_MAP[model_name]['depth_coef'] __magic_name__ : Union[str, Any] = CONFIG_MAP[model_name]['image_size'] __magic_name__ : str = CONFIG_MAP[model_name]['dropout_rate'] __magic_name__ : List[str] = CONFIG_MAP[model_name]['dw_padding'] __magic_name__ : str = 'huggingface/label-files' __magic_name__ : Dict = 'imagenet-1k-id2label.json' __magic_name__ : List[str] = 1000 __magic_name__ : Any = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) ) __magic_name__ : Any = {int(lowerCAmelCase ): v for k, v in idalabel.items()} __magic_name__ : int = idalabel __magic_name__ : Optional[int] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( ): """simple docstring""" __magic_name__ : str = 'http://images.cocodataset.org/val2017/000000039769.jpg' __magic_name__ : List[str] = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw ) return im def lowerCamelCase ( lowerCAmelCase : Optional[int] ): """simple docstring""" __magic_name__ : int = CONFIG_MAP[model_name]['image_size'] __magic_name__ : List[str] = EfficientNetImageProcessor( size={'height': size, 'width': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=lowerCAmelCase , ) return preprocessor def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" __magic_name__ : Union[str, Any] = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )] __magic_name__ : Optional[Any] = sorted(set(lowerCAmelCase ) ) __magic_name__ : Optional[Any] = len(lowerCAmelCase ) __magic_name__ : Tuple = {b: str(lowerCAmelCase ) for b, i in zip(lowerCAmelCase , range(lowerCAmelCase ) )} __magic_name__ : Optional[int] = [] rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') ) rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') ) rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') ) rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') ) rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') ) for b in block_names: __magic_name__ : Union[str, Any] = block_name_mapping[b] rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') ) rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') ) rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') ) rename_keys.append( (f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') ) rename_keys.append( (f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') ) rename_keys.append( (f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') ) rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') ) rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') ) rename_keys.append( (f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') ) rename_keys.append( (f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') ) rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') ) rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') ) rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') ) rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') ) rename_keys.append( (f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') ) rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') ) rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') ) rename_keys.append( (f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') ) rename_keys.append( (f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') ) rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') ) rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') ) rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') ) rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') ) rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') ) __magic_name__ : Optional[Any] = {} for item in rename_keys: if item[0] in original_param_names: __magic_name__ : Dict = 'efficientnet.' + item[1] __magic_name__ : int = 'classifier.weight' __magic_name__ : List[Any] = 'classifier.bias' return key_mapping def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue __magic_name__ : Union[str, Any] = key_mapping[key] if "_conv" in key and "kernel" in key: __magic_name__ : Dict = torch.from_numpy(lowerCAmelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __magic_name__ : List[Any] = torch.from_numpy(lowerCAmelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __magic_name__ : Dict = torch.from_numpy(np.transpose(lowerCAmelCase ) ) else: __magic_name__ : Tuple = torch.from_numpy(lowerCAmelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowerCAmelCase ) @torch.no_grad() def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ): """simple docstring""" __magic_name__ : Dict = model_classes[model_name]( include_top=lowerCAmelCase , weights='imagenet' , input_tensor=lowerCAmelCase , input_shape=lowerCAmelCase , pooling=lowerCAmelCase , classes=1000 , classifier_activation='softmax' , ) __magic_name__ : List[Any] = original_model.trainable_variables __magic_name__ : Any = original_model.non_trainable_variables __magic_name__ : Any = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __magic_name__ : Tuple = param.numpy() __magic_name__ : Optional[int] = list(tf_params.keys() ) # Load HuggingFace model __magic_name__ : int = get_efficientnet_config(lowerCAmelCase ) __magic_name__ : List[Any] = EfficientNetForImageClassification(lowerCAmelCase ).eval() __magic_name__ : List[str] = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('Converting parameters...' ) __magic_name__ : Any = rename_keys(lowerCAmelCase ) replace_params(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # Initialize preprocessor and preprocess input image __magic_name__ : Tuple = convert_image_processor(lowerCAmelCase ) __magic_name__ : Dict = preprocessor(images=prepare_img() , return_tensors='pt' ) # HF model inference hf_model.eval() with torch.no_grad(): __magic_name__ : Optional[int] = hf_model(**lowerCAmelCase ) __magic_name__ : Any = outputs.logits.detach().numpy() # Original model inference __magic_name__ : Any = False __magic_name__ : int = CONFIG_MAP[model_name]['image_size'] __magic_name__ : Optional[int] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __magic_name__ : Dict = image.img_to_array(lowerCAmelCase ) __magic_name__ : str = np.expand_dims(lowerCAmelCase , axis=0 ) __magic_name__ : Any = original_model.predict(lowerCAmelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ), "The predicted logits are not the same." print('Model outputs match!' ) if save_model: # Create folder to save model if not os.path.isdir(lowerCAmelCase ): os.mkdir(lowerCAmelCase ) # Save converted model and image processor hf_model.save_pretrained(lowerCAmelCase ) preprocessor.save_pretrained(lowerCAmelCase ) if push_to_hub: # Push model and image processor to hub print(f'Pushing converted {model_name} to the hub...' ) __magic_name__ : Any = f'efficientnet-{model_name}' preprocessor.push_to_hub(lowerCAmelCase ) hf_model.push_to_hub(lowerCAmelCase ) if __name__ == "__main__": lowerCAmelCase :int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''b0''', type=str, help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''hf_model''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''') parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') lowerCAmelCase :Union[str, Any] = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
368
'''simple docstring''' import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase :Optional[int] = logging.get_logger(__name__) set_seed(7_7_0) lowerCAmelCase :str = { '''c_attn''': '''att_proj''', '''c_proj''': '''out_proj''', '''c_fc''': '''in_proj''', '''transformer.''': '''''', '''h.''': '''layers.''', '''ln_1''': '''layernorm_1''', '''ln_2''': '''layernorm_2''', '''ln_f''': '''layernorm_final''', '''wpe''': '''position_embeds_layer''', '''wte''': '''input_embeds_layer''', } lowerCAmelCase :Any = { '''text_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''text.pt''', }, '''coarse_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''coarse.pt''', }, '''fine_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''fine.pt''', }, '''text''': { '''repo_id''': '''suno/bark''', '''file_name''': '''text_2.pt''', }, '''coarse''': { '''repo_id''': '''suno/bark''', '''file_name''': '''coarse_2.pt''', }, '''fine''': { '''repo_id''': '''suno/bark''', '''file_name''': '''fine_2.pt''', }, } lowerCAmelCase :List[Any] = os.path.dirname(os.path.abspath(__file__)) lowerCAmelCase :List[Any] = os.path.join(os.path.expanduser('''~'''), '''.cache''') lowerCAmelCase :List[str] = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''') def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=False ): """simple docstring""" __magic_name__ : str = model_type if use_small: key += "_small" return os.path.join(lowerCAmelCase , REMOTE_MODEL_PATHS[key]['file_name'] ) def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ): """simple docstring""" os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase ) hf_hub_download(repo_id=lowerCAmelCase , filename=lowerCAmelCase , local_dir=lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : str="text" ): """simple docstring""" if model_type == "text": __magic_name__ : Tuple = BarkSemanticModel __magic_name__ : Optional[int] = BarkSemanticConfig __magic_name__ : List[Any] = BarkSemanticGenerationConfig elif model_type == "coarse": __magic_name__ : List[str] = BarkCoarseModel __magic_name__ : Dict = BarkCoarseConfig __magic_name__ : Tuple = BarkCoarseGenerationConfig elif model_type == "fine": __magic_name__ : Optional[Any] = BarkFineModel __magic_name__ : Dict = BarkFineConfig __magic_name__ : Tuple = BarkFineGenerationConfig else: raise NotImplementedError() __magic_name__ : int = f'{model_type}_small' if use_small else model_type __magic_name__ : List[str] = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(lowerCAmelCase ): logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' ) _download(model_info['repo_id'] , model_info['file_name'] ) __magic_name__ : Optional[Any] = torch.load(lowerCAmelCase , map_location=lowerCAmelCase ) # this is a hack __magic_name__ : Optional[Any] = checkpoint['model_args'] if "input_vocab_size" not in model_args: __magic_name__ : Dict = model_args['vocab_size'] __magic_name__ : Optional[int] = model_args['vocab_size'] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments __magic_name__ : Optional[Any] = model_args.pop('n_head' ) __magic_name__ : List[str] = model_args.pop('n_embd' ) __magic_name__ : List[Any] = model_args.pop('n_layer' ) __magic_name__ : Optional[Any] = ConfigClass(**checkpoint['model_args'] ) __magic_name__ : Any = ModelClass(config=lowerCAmelCase ) __magic_name__ : List[str] = GenerationConfigClass() __magic_name__ : List[Any] = model_generation_config __magic_name__ : str = checkpoint['model'] # fixup checkpoint __magic_name__ : str = '_orig_mod.' for k, v in list(state_dict.items() ): if k.startswith(lowerCAmelCase ): # replace part of the key with corresponding layer name in HF implementation __magic_name__ : Tuple = k[len(lowerCAmelCase ) :] for old_layer_name in new_layer_name_dict: __magic_name__ : int = new_k.replace(lowerCAmelCase , new_layer_name_dict[old_layer_name] ) __magic_name__ : Union[str, Any] = state_dict.pop(lowerCAmelCase ) __magic_name__ : Optional[Any] = set(state_dict.keys() ) - set(model.state_dict().keys() ) __magic_name__ : Any = {k for k in extra_keys if not k.endswith('.attn.bias' )} __magic_name__ : Any = set(model.state_dict().keys() ) - set(state_dict.keys() ) __magic_name__ : Dict = {k for k in missing_keys if not k.endswith('.attn.bias' )} if len(lowerCAmelCase ) != 0: raise ValueError(f'extra keys found: {extra_keys}' ) if len(lowerCAmelCase ) != 0: raise ValueError(f'missing keys: {missing_keys}' ) model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase ) __magic_name__ : Union[str, Any] = model.num_parameters(exclude_embeddings=lowerCAmelCase ) __magic_name__ : Optional[Any] = checkpoint['best_val_loss'].item() logger.info(f'model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase , 3 )} loss' ) model.eval() model.to(lowerCAmelCase ) del checkpoint, state_dict return model def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Tuple="text" ): """simple docstring""" if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() __magic_name__ : List[str] = 'cpu' # do conversion on cpu __magic_name__ : int = _get_ckpt_path(lowerCAmelCase , use_small=lowerCAmelCase ) __magic_name__ : Any = _load_model(lowerCAmelCase , lowerCAmelCase , model_type=lowerCAmelCase , use_small=lowerCAmelCase ) # load bark initial model __magic_name__ : List[str] = _bark_load_model(lowerCAmelCase , 'cpu' , model_type=lowerCAmelCase , use_small=lowerCAmelCase ) if model_type == "text": __magic_name__ : int = bark_model['model'] if model.num_parameters(exclude_embeddings=lowerCAmelCase ) != bark_model.get_num_params(): raise ValueError('initial and new models don\'t have the same number of parameters' ) # check if same output as the bark model __magic_name__ : Union[str, Any] = 5 __magic_name__ : Optional[int] = 10 if model_type in ["text", "coarse"]: __magic_name__ : Optional[Any] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int ) __magic_name__ : List[str] = bark_model(lowerCAmelCase )[0] __magic_name__ : Optional[int] = model(lowerCAmelCase ) # take last logits __magic_name__ : int = output_new_model_total.logits[:, [-1], :] else: __magic_name__ : Tuple = 3 __magic_name__ : List[str] = 8 __magic_name__ : List[str] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) __magic_name__ : str = model(lowerCAmelCase , lowerCAmelCase ) __magic_name__ : Tuple = bark_model(lowerCAmelCase , lowerCAmelCase ) __magic_name__ : Tuple = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError('initial and new outputs don\'t have the same shape' ) if (output_new_model - output_old_model).abs().max().item() > 1e-3: raise ValueError('initial and new outputs are not equal' ) Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) model.save_pretrained(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : str , ): """simple docstring""" __magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase ) __magic_name__ : Dict = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) ) __magic_name__ : str = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) ) __magic_name__ : int = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) ) __magic_name__ : List[Any] = EncodecConfig.from_pretrained('facebook/encodec_24khz' ) __magic_name__ : Optional[int] = BarkSemanticModel.from_pretrained(lowerCAmelCase ) __magic_name__ : Dict = BarkCoarseModel.from_pretrained(lowerCAmelCase ) __magic_name__ : List[str] = BarkFineModel.from_pretrained(lowerCAmelCase ) __magic_name__ : Optional[Any] = EncodecModel.from_pretrained('facebook/encodec_24khz' ) __magic_name__ : Dict = BarkConfig.from_sub_model_configs( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) __magic_name__ : List[Any] = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) __magic_name__ : int = BarkModel(lowerCAmelCase ) __magic_name__ : List[str] = semantic __magic_name__ : Optional[int] = coarseAcoustic __magic_name__ : List[str] = fineAcoustic __magic_name__ : int = codec __magic_name__ : Union[str, Any] = bark_generation_config Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) bark.save_pretrained(lowerCAmelCase , repo_id=lowerCAmelCase , push_to_hub=lowerCAmelCase ) if __name__ == "__main__": lowerCAmelCase :Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''') lowerCAmelCase :Union[str, Any] = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
275
0
'''simple docstring''' lowerCamelCase = """0.18.2""" from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
166
import math def A__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__lowerCamelCase ) def A__ ( __lowerCamelCase = 1 / 1_23_45 ): SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 3 while True: SCREAMING_SNAKE_CASE_ = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ = int(__lowerCamelCase ) total_partitions += 1 if check_partition_perfect(__lowerCamelCase ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__lowerCamelCase ) integer += 1 if __name__ == "__main__": print(F"""{solution() = }""")
299
0
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ): if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(lowerCamelCase ) * abs(lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
368
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __lowerCAmelCase ( lowerCAmelCase): def __init__( self: Any , _lowerCAmelCase: int , _lowerCAmelCase: str , _lowerCAmelCase: Union[str, Any] ): lowercase :List[str] = dataset lowercase :Optional[int] = process lowercase :Union[str, Any] = params def __len__( self: str ): return len(self.dataset ) def __getitem__( self: int , _lowerCAmelCase: Dict ): lowercase :Union[str, Any] = self.dataset[i] lowercase :Optional[int] = self.process(_lowerCAmelCase , **self.params ) return processed class __lowerCAmelCase ( lowerCAmelCase): def __init__( self: int , _lowerCAmelCase: Tuple , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: int , _lowerCAmelCase: Optional[int]=None ): lowercase :Optional[Any] = loader lowercase :int = infer lowercase :Dict = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowercase :Union[str, Any] = None lowercase :Any = loader_batch_size # Internal bookkeeping lowercase :Optional[Any] = None lowercase :Dict = None def __len__( self: Tuple ): return len(self.loader ) def __iter__( self: List[str] ): lowercase :Dict = iter(self.loader ) return self def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowercase :Optional[int] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowercase :str = {} for k, element in self._loader_batch_data.items(): if isinstance(_lowerCAmelCase , _lowerCAmelCase ): # Convert ModelOutput to tuple first lowercase :Dict = element.to_tuple() if isinstance(element[0] , torch.Tensor ): lowercase :int = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowercase :List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): lowercase :Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowercase :List[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowercase :Optional[int] = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowercase :Optional[Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowercase :Any = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. lowercase :List[Any] = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowercase :List[Any] = self._loader_batch_data.__class__(_lowerCAmelCase ) self._loader_batch_index += 1 return result def SCREAMING_SNAKE_CASE ( self: Optional[Any] ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowercase :Tuple = next(self.iterator ) lowercase :Dict = self.infer(_lowerCAmelCase , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_lowerCAmelCase , torch.Tensor ): lowercase :List[str] = processed else: lowercase :Tuple = list(processed.keys() )[0] lowercase :Optional[Any] = processed[key] if isinstance(_lowerCAmelCase , _lowerCAmelCase ): lowercase :Optional[int] = len(_lowerCAmelCase ) else: lowercase :Dict = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowercase :Tuple = observed_batch_size # Setting internal index to unwrap the batch lowercase :int = processed lowercase :Optional[Any] = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __lowerCAmelCase ( lowerCAmelCase): def __init__( self: Union[str, Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: str , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any]=None ): super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def __iter__( self: Tuple ): lowercase :List[str] = iter(self.loader ) lowercase :str = None return self def SCREAMING_SNAKE_CASE ( self: Optional[Any] ): if self.subiterator is None: lowercase :List[Any] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item lowercase :str = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowercase :Tuple = self.infer(next(self.iterator ) , **self.params ) lowercase :Dict = next(self.subiterator ) return processed class __lowerCAmelCase ( lowerCAmelCase): def __iter__( self: str ): lowercase :List[Any] = iter(self.loader ) return self def SCREAMING_SNAKE_CASE ( self: str ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. lowercase :str = False lowercase :int = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowercase :str = self.loader_batch_item() lowercase :int = item.pop("is_last" ) accumulator.append(_lowerCAmelCase ) if is_last: return accumulator while not is_last: lowercase :str = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_lowerCAmelCase , torch.Tensor ): lowercase :Tuple = processed else: lowercase :Union[str, Any] = list(processed.keys() )[0] lowercase :Any = processed[key] if isinstance(_lowerCAmelCase , _lowerCAmelCase ): lowercase :Dict = len(_lowerCAmelCase ) else: lowercase :List[str] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowercase :Union[str, Any] = observed_batch_size lowercase :str = processed lowercase :Optional[int] = 0 while self._loader_batch_index < self.loader_batch_size: lowercase :Any = self.loader_batch_item() lowercase :int = item.pop("is_last" ) accumulator.append(_lowerCAmelCase ) if is_last: return accumulator else: lowercase :Optional[Any] = processed lowercase :str = item.pop("is_last" ) accumulator.append(_lowerCAmelCase ) return accumulator class __lowerCAmelCase ( lowerCAmelCase): def __init__( self: Union[str, Any] , _lowerCAmelCase: Dataset , _lowerCAmelCase: str ): lowercase :Tuple = dataset lowercase :Dict = key def __len__( self: Any ): return len(self.dataset ) def __getitem__( self: int , _lowerCAmelCase: int ): return self.dataset[i][self.key] class __lowerCAmelCase ( lowerCAmelCase): def __init__( self: List[Any] , _lowerCAmelCase: Dataset , _lowerCAmelCase: str , _lowerCAmelCase: str ): lowercase :Union[str, Any] = dataset lowercase :Optional[int] = keya lowercase :str = keya def __len__( self: Optional[Any] ): return len(self.dataset ) def __getitem__( self: Optional[Any] , _lowerCAmelCase: int ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
158
0
'''simple docstring''' import os import string import sys __A = 1 << 8 __A = { "tab": ord("\t"), "newline": ord("\r"), "esc": 27, "up": 65 + ARROW_KEY_FLAG, "down": 66 + ARROW_KEY_FLAG, "right": 67 + ARROW_KEY_FLAG, "left": 68 + ARROW_KEY_FLAG, "mod_int": 91, "undefined": sys.maxsize, "interrupt": 3, "insert": 50, "delete": 51, "pg_up": 53, "pg_down": 54, } __A = KEYMAP["up"] __A = KEYMAP["left"] if sys.platform == "win32": __A = [] __A = { B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG, B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG, B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG, B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG, B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG, B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG, B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG, B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG, } for i in range(10): __A = ord(str(i)) def _A ( ): if os.name == "nt": import msvcrt lowercase__ = """mbcs""" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(lowercase__ ) == 0: # Read the keystroke lowercase__ = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): lowercase__ = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: lowercase__ = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) ) WIN_CH_BUFFER.append(lowercase__ ) if ord(lowercase__ ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) lowercase__ = chr(KEYMAP["""esc"""] ) except KeyError: lowercase__ = cha[1] else: lowercase__ = ch.decode(lowercase__ ) else: lowercase__ = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty lowercase__ = sys.stdin.fileno() lowercase__ = termios.tcgetattr(lowercase__ ) try: tty.setraw(lowercase__ ) lowercase__ = sys.stdin.read(1 ) finally: termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ ) return ch def _A ( ): lowercase__ = get_raw_chars() if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(lowercase__ ) == KEYMAP["esc"]: lowercase__ = get_raw_chars() if ord(lowercase__ ) == KEYMAP["mod_int"]: lowercase__ = get_raw_chars() if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(lowercase__ ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
164
'''simple docstring''' def _A ( ): lowercase__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] lowercase__ = 6 lowercase__ = 1 lowercase__ = 1901 lowercase__ = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 lowercase__ = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 lowercase__ = day - 29 else: if day > days_per_month[month - 1]: month += 1 lowercase__ = day - days_per_month[month - 2] if month > 12: year += 1 lowercase__ = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
164
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __SCREAMING_SNAKE_CASE ( __lowerCAmelCase ): A : List[str] = ['''image_processor''', '''tokenizer'''] A : int = '''ViTImageProcessor''' A : Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ): lowercase : List[str] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , lowerCAmelCase_ , ) lowercase : Optional[Any] = kwargs.pop('''feature_extractor''' ) lowercase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) def __call__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ): if text is None and visual_prompt is None and images is None: raise ValueError('''You have to specify either text, visual prompt or images.''' ) if text is not None and visual_prompt is not None: raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' ) if text is not None: lowercase : Dict = self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) if visual_prompt is not None: lowercase : List[str] = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) if images is not None: lowercase : Tuple = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) if visual_prompt is not None and images is not None: lowercase : List[str] = { '''pixel_values''': image_features.pixel_values, '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowercase : str = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowercase : Tuple = { '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ ) def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) @property def __lowerCamelCase ( self ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase_ , ) return self.image_processor_class @property def __lowerCamelCase ( self ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase_ , ) return self.image_processor
365
import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : Any = os.path.abspath(_UpperCamelCase ) logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" ) # Load weights from TF model lowercase : Optional[int] = tf.train.list_variables(_UpperCamelCase ) lowercase : Optional[int] = [] lowercase : Optional[int] = [] lowercase : Optional[Any] = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") lowercase : int = full_name.split('''/''' ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(f"""Skipping non-model layer {full_name}""" ) continue if "optimizer" in full_name: logger.info(f"""Skipping optimization layer {full_name}""" ) continue if name[0] == "model": # ignore initial 'model' lowercase : List[Any] = name[1:] # figure out how many levels deep the name is lowercase : Optional[int] = 0 for _name in name: if _name.startswith('''layer_with_weights''' ): depth += 1 else: break layer_depth.append(_UpperCamelCase ) # read data lowercase : Any = tf.train.load_variable(_UpperCamelCase, _UpperCamelCase ) names.append('''/'''.join(_UpperCamelCase ) ) arrays.append(_UpperCamelCase ) logger.info(f"""Read a total of {len(_UpperCamelCase ):,} layers""" ) # Sanity check if len(set(_UpperCamelCase ) ) != 1: raise ValueError(f"""Found layer names with different depths (layer depth {list(set(_UpperCamelCase ) )})""" ) lowercase : List[str] = list(set(_UpperCamelCase ) )[0] if layer_depth != 1: raise ValueError( '''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP''' ''' heads.''' ) # convert layers logger.info('''Converting weights...''' ) for full_name, array in zip(_UpperCamelCase, _UpperCamelCase ): lowercase : Optional[int] = full_name.split('''/''' ) lowercase : Tuple = model lowercase : Any = [] for i, m_name in enumerate(_UpperCamelCase ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith('''layer_with_weights''' ): lowercase : Tuple = int(m_name.split('''-''' )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(['''embeddings''', '''LayerNorm'''] ) lowercase : int = getattr(_UpperCamelCase, '''embeddings''' ) lowercase : Optional[Any] = getattr(_UpperCamelCase, '''LayerNorm''' ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] ) lowercase : int = getattr(_UpperCamelCase, '''encoder''' ) lowercase : Tuple = getattr(_UpperCamelCase, '''layer''' ) lowercase : List[Any] = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(['''pooler''', '''dense'''] ) lowercase : str = getattr(_UpperCamelCase, '''pooler''' ) lowercase : str = getattr(_UpperCamelCase, '''dense''' ) elif m_name == "embeddings": trace.append('''embeddings''' ) lowercase : Optional[int] = getattr(_UpperCamelCase, '''embeddings''' ) if layer_num == 0: trace.append('''word_embeddings''' ) lowercase : str = getattr(_UpperCamelCase, '''word_embeddings''' ) elif layer_num == 1: trace.append('''position_embeddings''' ) lowercase : List[Any] = getattr(_UpperCamelCase, '''position_embeddings''' ) elif layer_num == 2: trace.append('''token_type_embeddings''' ) lowercase : int = getattr(_UpperCamelCase, '''token_type_embeddings''' ) else: raise ValueError(f"""Unknown embedding layer with name {full_name}""" ) trace.append('''weight''' ) lowercase : Union[str, Any] = getattr(_UpperCamelCase, '''weight''' ) elif m_name == "_attention_layer": # self-attention layer trace.extend(['''attention''', '''self'''] ) lowercase : Tuple = getattr(_UpperCamelCase, '''attention''' ) lowercase : str = getattr(_UpperCamelCase, '''self''' ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(['''attention''', '''output''', '''LayerNorm'''] ) lowercase : Dict = getattr(_UpperCamelCase, '''attention''' ) lowercase : Any = getattr(_UpperCamelCase, '''output''' ) lowercase : Union[str, Any] = getattr(_UpperCamelCase, '''LayerNorm''' ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(['''attention''', '''output''', '''dense'''] ) lowercase : Union[str, Any] = getattr(_UpperCamelCase, '''attention''' ) lowercase : str = getattr(_UpperCamelCase, '''output''' ) lowercase : Optional[int] = getattr(_UpperCamelCase, '''dense''' ) elif m_name == "_output_dense": # output dense trace.extend(['''output''', '''dense'''] ) lowercase : List[str] = getattr(_UpperCamelCase, '''output''' ) lowercase : str = getattr(_UpperCamelCase, '''dense''' ) elif m_name == "_output_layer_norm": # output dense trace.extend(['''output''', '''LayerNorm'''] ) lowercase : Dict = getattr(_UpperCamelCase, '''output''' ) lowercase : int = getattr(_UpperCamelCase, '''LayerNorm''' ) elif m_name == "_key_dense": # attention key trace.append('''key''' ) lowercase : Optional[Any] = getattr(_UpperCamelCase, '''key''' ) elif m_name == "_query_dense": # attention query trace.append('''query''' ) lowercase : Dict = getattr(_UpperCamelCase, '''query''' ) elif m_name == "_value_dense": # attention value trace.append('''value''' ) lowercase : Optional[Any] = getattr(_UpperCamelCase, '''value''' ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(['''intermediate''', '''dense'''] ) lowercase : List[str] = getattr(_UpperCamelCase, '''intermediate''' ) lowercase : Optional[int] = getattr(_UpperCamelCase, '''dense''' ) elif m_name == "_output_layer_norm": # output layer norm trace.append('''output''' ) lowercase : Tuple = getattr(_UpperCamelCase, '''output''' ) # weights & biases elif m_name in ["bias", "beta"]: trace.append('''bias''' ) lowercase : str = getattr(_UpperCamelCase, '''bias''' ) elif m_name in ["kernel", "gamma"]: trace.append('''weight''' ) lowercase : Dict = getattr(_UpperCamelCase, '''weight''' ) else: logger.warning(f"""Ignored {m_name}""" ) # for certain layers reshape is necessary lowercase : Any = '''.'''.join(_UpperCamelCase ) if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''', _UpperCamelCase ) or re.match( R'''(\S+)\.attention\.output\.dense\.weight''', _UpperCamelCase ): lowercase : Any = array.reshape(pointer.data.shape ) if "kernel" in full_name: lowercase : List[str] = array.transpose() if pointer.shape == array.shape: lowercase : Optional[Any] = torch.from_numpy(_UpperCamelCase ) else: raise ValueError( f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:""" f""" {array.shape}""" ) logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" ) return model def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Tuple: """simple docstring""" logger.info(f"""Loading model based on config from {config_path}...""" ) lowercase : List[Any] = BertConfig.from_json_file(_UpperCamelCase ) lowercase : Dict = BertModel(_UpperCamelCase ) # Load weights from checkpoint logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" ) load_tfa_weights_in_bert(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) # Save pytorch-model logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" ) torch.save(model.state_dict(), _UpperCamelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( '''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', type=str, required=True, help='''The config json file corresponding to the BERT model. This specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', type=str, required=True, help='''Path to the output PyTorch model (must include filename).''', ) __a = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
173
0
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def __UpperCamelCase ( _A , _A , _A=0 ): # Format the message. if name is None: lowerCAmelCase_ = None else: lowerCAmelCase_ = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}''' lowerCAmelCase_ = fmt.format(_A ) # Print and recurse (if needed). if isinstance(_A , _A ): if msg is not None: print(_A ) for k in val.keys(): recursive_print(_A , val[k] , spaces + 2 ) elif isinstance(_A , torch.Tensor ): print(_A , ''':''' , val.size() ) else: print(_A , ''':''' , _A ) def __UpperCamelCase ( _A , _A , _A , _A , _A ): # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] # for compatibility with later versions of NVIDIA Megatron-LM. # The inverse operation is performed inside Megatron-LM to read checkpoints: # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 # If param is the weight tensor of the self-attention block, the returned tensor # will have to be transposed one more time to be read by HuggingFace GPT2. lowerCAmelCase_ = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] lowerCAmelCase_ = (num_heads, hidden_size, num_splits) + input_shape[1:] lowerCAmelCase_ = param.view(*_A ) lowerCAmelCase_ = param.transpose(0 , 2 ) lowerCAmelCase_ = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] lowerCAmelCase_ = (num_heads, num_splits, hidden_size) + input_shape[1:] lowerCAmelCase_ = param.view(*_A ) lowerCAmelCase_ = param.transpose(0 , 1 ).contiguous() lowerCAmelCase_ = param.view(*_A ) return param def __UpperCamelCase ( _A , _A , _A ): # The converted output model. lowerCAmelCase_ = {} # old versions did not store training args lowerCAmelCase_ = input_state_dict.get('''args''' , _A ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) lowerCAmelCase_ = ds_args.padded_vocab_size lowerCAmelCase_ = ds_args.max_position_embeddings lowerCAmelCase_ = ds_args.hidden_size lowerCAmelCase_ = ds_args.num_layers lowerCAmelCase_ = ds_args.num_attention_heads lowerCAmelCase_ = ds_args.ffn_hidden_size # pprint(config) # The number of heads. lowerCAmelCase_ = config.n_head # The hidden_size per head. lowerCAmelCase_ = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): lowerCAmelCase_ = input_state_dict['''checkpoint_version'''] else: lowerCAmelCase_ = 0.0 # The model. lowerCAmelCase_ = input_state_dict['''model'''] # The language model. lowerCAmelCase_ = model['''language_model'''] # The embeddings. lowerCAmelCase_ = lm['''embedding'''] # The word embeddings. lowerCAmelCase_ = embeddings['''word_embeddings''']['''weight'''] # Truncate the embedding table to vocab_size rows. lowerCAmelCase_ = word_embeddings[: config.vocab_size, :] lowerCAmelCase_ = word_embeddings # The position embeddings. lowerCAmelCase_ = embeddings['''position_embeddings''']['''weight'''] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] lowerCAmelCase_ = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( f"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" ) # Store the position embeddings. lowerCAmelCase_ = pos_embeddings # The transformer. lowerCAmelCase_ = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder'''] # The regex to extract layer names. lowerCAmelCase_ = re.compile(r'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' ) # The simple map of names for "automated" rules. lowerCAmelCase_ = { '''attention.dense''': '''.attn.c_proj.''', '''self_attention.dense''': '''.attn.c_proj.''', '''mlp.dense_h_to_4h''': '''.mlp.c_fc.''', '''mlp.dense_4h_to_h''': '''.mlp.c_proj.''', } # Extract the layers. for key, val in transformer.items(): # Match the name. lowerCAmelCase_ = layer_re.match(_A ) # Stop if that's not a layer if m is None: break # The index of the layer. lowerCAmelCase_ = int(m.group(1 ) ) # The name of the operation. lowerCAmelCase_ = m.group(2 ) # Is it a weight or a bias? lowerCAmelCase_ = m.group(3 ) # The name of the layer. lowerCAmelCase_ = f"transformer.h.{layer_idx}" # For layernorm(s), simply store the layer norm. if op_name.endswith('''layernorm''' ): lowerCAmelCase_ = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2''' lowerCAmelCase_ = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. lowerCAmelCase_ = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , _A , _A ) lowerCAmelCase_ = causal_mask # Insert a "dummy" tensor for masked_bias. lowerCAmelCase_ = torch.tensor(-1E4 , dtype=torch.floataa ) lowerCAmelCase_ = masked_bias lowerCAmelCase_ = fix_query_key_value_ordering(_A , _A , 3 , _A , _A ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. lowerCAmelCase_ = out_val.transpose(0 , 1 ).contiguous() # Store. lowerCAmelCase_ = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": lowerCAmelCase_ = fix_query_key_value_ordering(_A , _A , 3 , _A , _A ) # Store. No change of shape. lowerCAmelCase_ = out_val # Transpose the weights. elif weight_or_bias == "weight": lowerCAmelCase_ = megatron_to_transformers[op_name] lowerCAmelCase_ = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": lowerCAmelCase_ = megatron_to_transformers[op_name] lowerCAmelCase_ = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. lowerCAmelCase_ = transformer['''final_layernorm.weight'''] lowerCAmelCase_ = transformer['''final_layernorm.bias'''] # For LM head, transformers' wants the matrix to weight embeddings. lowerCAmelCase_ = word_embeddings # It should be done! return output_state_dict def __UpperCamelCase ( ): # Create the argument parser. lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' ) parser.add_argument( '''path_to_checkpoint''' , type=_A , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , ) parser.add_argument( '''--config_file''' , default='''''' , type=_A , help='''An optional config json file describing the pre-trained model.''' , ) lowerCAmelCase_ = parser.parse_args() # Extract the basename. lowerCAmelCase_ = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" ) if args.path_to_checkpoint.endswith('''.zip''' ): with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint: with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict: lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) else: lowerCAmelCase_ = torch.load(args.path_to_checkpoint , map_location='''cpu''' ) lowerCAmelCase_ = input_state_dict.get('''args''' , _A ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: lowerCAmelCase_ = '''gelu_fast''' elif ds_args.openai_gelu: lowerCAmelCase_ = '''gelu_new''' else: lowerCAmelCase_ = '''gelu''' else: # in the very early days this used to be "gelu_new" lowerCAmelCase_ = '''gelu_new''' # Spell out all parameters in case the defaults change. lowerCAmelCase_ = GPTaConfig( vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.0_2 , summary_type='''cls_index''' , summary_use_proj=_A , summary_activation=_A , summary_proj_to_labels=_A , summary_first_dropout=0.1 , scale_attn_weights=_A , use_cache=_A , bos_token_id=50256 , eos_token_id=50256 , ) else: lowerCAmelCase_ = GPTaConfig.from_json_file(args.config_file ) lowerCAmelCase_ = ['''GPT2LMHeadModel'''] # Convert. print('''Converting''' ) lowerCAmelCase_ = convert_megatron_checkpoint(_A , _A , _A ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(_A , _A ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: lowerCAmelCase_ = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": lowerCAmelCase_ = '''gpt2''' elif tokenizer_type == "PretrainedFromHF": lowerCAmelCase_ = ds_args.tokenizer_name_or_path else: raise ValueError(f"Unrecognized tokenizer_type {tokenizer_type}" ) else: lowerCAmelCase_ = '''gpt2''' lowerCAmelCase_ = AutoTokenizer.from_pretrained(_A ) lowerCAmelCase_ = type(_A ).__name__ lowerCAmelCase_ = tokenizer_class # Store the config to file. print('''Saving config''' ) config.save_pretrained(_A ) # Save tokenizer based on args print(f"Adding {tokenizer_class} tokenizer files" ) tokenizer.save_pretrained(_A ) # Store the state_dict to file. lowerCAmelCase_ = os.path.join(_A , '''pytorch_model.bin''' ) print(f"Saving checkpoint to \"{output_checkpoint_file}\"" ) torch.save(_A , _A ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
278
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __UpperCamelCase ( _A ): lowerCAmelCase_ = [ '''decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_A , _A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(_A , _A , bias=_A ) lowerCAmelCase_ = emb.weight.data return lin_layer def __UpperCamelCase ( _A ): lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = Namespace(**checkpoint['''cfg''']['''model'''] ) lowerCAmelCase_ = checkpoint['''model'''] remove_ignore_keys_(_A ) lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0] lowerCAmelCase_ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()} lowerCAmelCase_ = XGLMConfig( vocab_size=_A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) lowerCAmelCase_ = XGLMForCausalLM(_A ) lowerCAmelCase_ = model.load_state_dict(_A , strict=_A ) print(_A ) lowerCAmelCase_ = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') _A = parser.parse_args() _A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
278
1
"""simple docstring""" import string def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]: '''simple docstring''' lowercase_ = '''''' for i in sequence: lowercase_ = ord(UpperCamelCase__ ) if 65 <= extract <= 90: output += chr(1_55 - extract ) elif 97 <= extract <= 1_22: output += chr(2_19 - extract ) else: output += i return output def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]: '''simple docstring''' lowercase_ = string.ascii_letters lowercase_ = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1] return "".join( letters_reversed[letters.index(UpperCamelCase__ )] if c in letters else c for c in sequence ) def _SCREAMING_SNAKE_CASE () -> Optional[Any]: '''simple docstring''' from timeit import timeit print("""Running performance benchmarks...""" ) lowercase_ = '''from string import printable ; from __main__ import atbash, atbash_slow''' print(F'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=UpperCamelCase__ )} seconds''' ) print(F'''> atbash(): {timeit("atbash(printable)" , setup=UpperCamelCase__ )} seconds''' ) if __name__ == "__main__": for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"): print(F"{example} encrypted in atbash: {atbash(example)}") benchmark()
352
"""simple docstring""" import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer UpperCAmelCase : Any = logging.get_logger(__name__) UpperCAmelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} UpperCAmelCase : List[Any] = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } UpperCAmelCase : Union[str, Any] = { "allenai/led-base-16384": 1_6384, } class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = LEDTokenizer lowercase__ = ["input_ids", "attention_mask"] def __init__( self : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]="replace" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[Any]="</s>" , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : List[str]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : Optional[Any] , ): """simple docstring""" super().__init__( lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , ) lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space: lowercase_ = getattr(lowerCAmelCase_ , pre_tok_state.pop("""type""")) lowercase_ = add_prefix_space lowercase_ = pre_tok_class(**lowerCAmelCase_) lowercase_ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase_ = """post_processor""" lowercase_ = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_) if tokenizer_component_instance: lowercase_ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ = tuple(state["""sep"""]) if "cls" in state: lowercase_ = tuple(state["""cls"""]) lowercase_ = False if state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space: lowercase_ = add_prefix_space lowercase_ = True if state.get("""trim_offsets""" , lowerCAmelCase_) != trim_offsets: lowercase_ = trim_offsets lowercase_ = True if changes_to_apply: lowercase_ = getattr(lowerCAmelCase_ , state.pop("""type""")) lowercase_ = component_class(**lowerCAmelCase_) setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _UpperCAmelCase ( self : List[str]): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""") return None return str(self._mask_token) @mask_token.setter def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str): """simple docstring""" lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else value lowercase_ = value def _UpperCAmelCase ( self : Dict , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[Any]): """simple docstring""" lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""") return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_) def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any): """simple docstring""" lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""") return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_) def _UpperCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None): """simple docstring""" lowercase_ = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_) return tuple(lowerCAmelCase_) def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None): """simple docstring""" lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None): """simple docstring""" lowercase_ = [self.sep_token_id] lowercase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ): """simple docstring""" lowercase_ = super()._pad( encoded_inputs=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) # Load from model defaults if return_attention_mask is None: lowercase_ = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowercase_ = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowercase_ = len(encoded_inputs["""global_attention_mask"""]) != len(lowerCAmelCase_) if needs_to_be_padded: lowercase_ = len(lowerCAmelCase_) - len(encoded_inputs["""global_attention_mask"""]) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowercase_ = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": lowercase_ = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side)) return encoded_inputs
313
0
def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) for i in range(length - 1 ): __magic_name__ = i for k in range(i + 1, A_ ): if collection[k] < collection[least]: __magic_name__ = k if least != i: __magic_name__ , __magic_name__ = (collection[i], collection[least]) return collection if __name__ == "__main__": __lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip() __lowerCAmelCase : str = [int(item) for item in user_input.split(',')] print(selection_sort(unsorted))
88
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class A ( unittest.TestCase ): def _A (self ): __lowercase= logging.get_logger() # the current default level is logging.WARNING __lowercase= logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) def _A (self ): __lowercase= logging.get_verbosity() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= os.getenv('TRANSFORMERS_VERBOSITY' , lowerCAmelCase ) __lowercase= logging.log_levels[env_level_str] __lowercase= logging.get_verbosity() self.assertEqual( lowerCAmelCase , lowerCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , ) # restore to the original level __lowercase= '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() __lowercase= logging.logging.getLogger() with CaptureLogger(lowerCAmelCase ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def _A (self ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
295
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = {"""vocab_file""": """sentencepiece.model"""} __A = { """vocab_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""", }, } __A = { """google/rembert""": 256, } class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Optional[Any] = VOCAB_FILES_NAMES __magic_name__ :Any = PRETRAINED_VOCAB_FILES_MAP __magic_name__ :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ): '''simple docstring''' super().__init__( do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , ) lowerCAmelCase__ :int = do_lower_case lowerCAmelCase__ :Optional[int] = remove_space lowerCAmelCase__ :Union[str, Any] = keep_accents lowerCAmelCase__ :int = vocab_file lowerCAmelCase__ :List[str] = spm.SentencePieceProcessor() self.sp_model.Load(__UpperCAmelCase ) @property def snake_case ( self ): '''simple docstring''' return len(self.sp_model ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = self.__dict__.copy() lowerCAmelCase__ :Any = None return state def __setstate__( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = d lowerCAmelCase__ :Optional[int] = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.sp_model.EncodeAsPieces(__UpperCAmelCase ) return pieces def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return self.sp_model.PieceToId(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return self.sp_model.IdToPiece(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[str] = self.sp_model.decode_pieces(__UpperCAmelCase ) return out_string def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = [self.sep_token_id] lowerCAmelCase__ :int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1] def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' lowerCAmelCase__ :Any = [self.sep_token_id] lowerCAmelCase__ :List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if not os.path.isdir(__UpperCAmelCase ): logger.error('Vocabulary path ({}) should be a directory'.format(__UpperCAmelCase ) ) return lowerCAmelCase__ :int = os.path.join( __UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
254
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) def __A (_SCREAMING_SNAKE_CASE ) ->str: """simple docstring""" lowerCAmelCase__ :Dict = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: lowerCAmelCase__ :Tuple = [144, 192, 240] lowerCAmelCase__ :List[str] = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: lowerCAmelCase__ :List[str] = [96, 120, 144] lowerCAmelCase__ :List[Any] = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: lowerCAmelCase__ :Union[str, Any] = [64, 80, 96] lowerCAmelCase__ :int = [16, 16, 24, 48, 64, 80, 320] lowerCAmelCase__ :Optional[Any] = 0.0_5 lowerCAmelCase__ :Tuple = 2.0 if mobilevit_name.startswith('deeplabv3_' ): lowerCAmelCase__ :int = 512 lowerCAmelCase__ :Optional[Any] = 16 lowerCAmelCase__ :int = 21 lowerCAmelCase__ :Tuple = 'pascal-voc-id2label.json' else: lowerCAmelCase__ :int = 1000 lowerCAmelCase__ :Optional[Any] = 'imagenet-1k-id2label.json' lowerCAmelCase__ :Optional[int] = 'huggingface/label-files' lowerCAmelCase__ :List[str] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowerCAmelCase__ :List[Any] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowerCAmelCase__ :Dict = idalabel lowerCAmelCase__ :List[Any] = {v: k for k, v in idalabel.items()} return config def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->Optional[int]: """simple docstring""" for i in range(1 , 6 ): if F"layer_{i}." in name: lowerCAmelCase__ :List[Any] = name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." ) if "conv_1." in name: lowerCAmelCase__ :Dict = name.replace('conv_1.' , 'conv_stem.' ) if ".block." in name: lowerCAmelCase__ :List[str] = name.replace('.block.' , '.' ) if "exp_1x1" in name: lowerCAmelCase__ :str = name.replace('exp_1x1' , 'expand_1x1' ) if "red_1x1" in name: lowerCAmelCase__ :Any = name.replace('red_1x1' , 'reduce_1x1' ) if ".local_rep.conv_3x3." in name: lowerCAmelCase__ :List[Any] = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' ) if ".local_rep.conv_1x1." in name: lowerCAmelCase__ :Any = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' ) if ".norm." in name: lowerCAmelCase__ :Union[str, Any] = name.replace('.norm.' , '.normalization.' ) if ".conv." in name: lowerCAmelCase__ :List[str] = name.replace('.conv.' , '.convolution.' ) if ".conv_proj." in name: lowerCAmelCase__ :List[str] = name.replace('.conv_proj.' , '.conv_projection.' ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F".{i}.{j}." in name: lowerCAmelCase__ :Optional[int] = name.replace(F".{i}.{j}." , F".{i}.layer.{j}." ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F".{i}.{j}." in name: lowerCAmelCase__ :Dict = name.replace(F".{i}.{j}." , F".{i}." ) if "expand_1x1" in name: lowerCAmelCase__ :Dict = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' ) if "conv_3x3" in name: lowerCAmelCase__ :Optional[int] = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' ) if "reduce_1x1" in name: lowerCAmelCase__ :Optional[Any] = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' ) for i in range(2 , 5 ): if F".global_rep.{i}.weight" in name: lowerCAmelCase__ :Tuple = name.replace(F".global_rep.{i}.weight" , '.layernorm.weight' ) if F".global_rep.{i}.bias" in name: lowerCAmelCase__ :Any = name.replace(F".global_rep.{i}.bias" , '.layernorm.bias' ) if ".global_rep." in name: lowerCAmelCase__ :List[str] = name.replace('.global_rep.' , '.transformer.' ) if ".pre_norm_mha.0." in name: lowerCAmelCase__ :int = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' ) if ".pre_norm_mha.1.out_proj." in name: lowerCAmelCase__ :Any = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' ) if ".pre_norm_ffn.0." in name: lowerCAmelCase__ :Optional[Any] = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' ) if ".pre_norm_ffn.1." in name: lowerCAmelCase__ :Optional[Any] = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' ) if ".pre_norm_ffn.4." in name: lowerCAmelCase__ :Tuple = name.replace('.pre_norm_ffn.4.' , '.output.dense.' ) if ".transformer." in name: lowerCAmelCase__ :Any = name.replace('.transformer.' , '.transformer.layer.' ) if ".aspp_layer." in name: lowerCAmelCase__ :str = name.replace('.aspp_layer.' , '.' ) if ".aspp_pool." in name: lowerCAmelCase__ :Optional[int] = name.replace('.aspp_pool.' , '.' ) if "seg_head." in name: lowerCAmelCase__ :Optional[int] = name.replace('seg_head.' , 'segmentation_head.' ) if "segmentation_head.classifier.classifier." in name: lowerCAmelCase__ :int = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' ) if "classifier.fc." in name: lowerCAmelCase__ :Union[str, Any] = name.replace('classifier.fc.' , 'classifier.' ) elif (not base_model) and ("segmentation_head." not in name): lowerCAmelCase__ :Optional[Any] = 'mobilevit.' + name return name def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->Union[str, Any]: """simple docstring""" if base_model: lowerCAmelCase__ :Union[str, Any] = '' else: lowerCAmelCase__ :Tuple = 'mobilevit.' for key in orig_state_dict.copy().keys(): lowerCAmelCase__ :Union[str, Any] = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if key[:8] == "encoder.": lowerCAmelCase__ :Optional[int] = key[8:] if "qkv" in key: lowerCAmelCase__ :Tuple = key.split('.' ) lowerCAmelCase__ :List[Any] = int(key_split[0][6:] ) - 1 lowerCAmelCase__ :Any = int(key_split[3] ) lowerCAmelCase__ :Optional[int] = model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" ) lowerCAmelCase__ :Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size lowerCAmelCase__ :List[Any] = ( F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention." ) if "weight" in key: lowerCAmelCase__ :str = val[:dim, :] lowerCAmelCase__ :int = val[dim : dim * 2, :] lowerCAmelCase__ :Optional[int] = val[-dim:, :] else: lowerCAmelCase__ :Union[str, Any] = val[:dim] lowerCAmelCase__ :Any = val[dim : dim * 2] lowerCAmelCase__ :Tuple = val[-dim:] else: lowerCAmelCase__ :List[str] = val return orig_state_dict def __A () ->str: """simple docstring""" lowerCAmelCase__ :Any = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCAmelCase__ :Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->List[Any]: """simple docstring""" lowerCAmelCase__ :int = get_mobilevit_config(_SCREAMING_SNAKE_CASE ) # load original state_dict lowerCAmelCase__ :Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) # load 🤗 model if mobilevit_name.startswith('deeplabv3_' ): lowerCAmelCase__ :Optional[int] = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval() else: lowerCAmelCase__ :Optional[int] = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() lowerCAmelCase__ :Tuple = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by MobileViTImageProcessor lowerCAmelCase__ :List[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowerCAmelCase__ :Optional[int] = image_processor(images=prepare_img() , return_tensors='pt' ) lowerCAmelCase__ :List[Any] = model(**_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Tuple = outputs.logits if mobilevit_name.startswith('deeplabv3_' ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": lowerCAmelCase__ :Optional[int] = torch.tensor( [ [[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]], [[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]], [[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": lowerCAmelCase__ :Dict = torch.tensor( [ [[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]], [[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]], [[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": lowerCAmelCase__ :Tuple = torch.tensor( [ [[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]], [[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]], [[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]], ] ) else: raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" ) assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": lowerCAmelCase__ :Union[str, Any] = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] ) elif mobilevit_name == "mobilevit_xs": lowerCAmelCase__ :Any = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] ) elif mobilevit_name == "mobilevit_xxs": lowerCAmelCase__ :str = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ) else: raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" ) assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: lowerCAmelCase__ :Dict = { 'mobilevit_s': 'mobilevit-small', 'mobilevit_xs': 'mobilevit-x-small', 'mobilevit_xxs': 'mobilevit-xx-small', 'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small', 'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small', 'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small', } print('Pushing to the hub...' ) lowerCAmelCase__ :str = model_mapping[mobilevit_name] image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization='apple' ) model.push_to_hub(_SCREAMING_SNAKE_CASE , organization='apple' ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--mobilevit_name""", default="""mobilevit_s""", type=str, help=( """Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',""" """ 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'.""" ), ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __A = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
254
1
'''simple docstring''' import unittest import numpy as np def _lowerCAmelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray | None = None , ) -> np.ndarray: """simple docstring""" _SCREAMING_SNAKE_CASE =np.shape(_UpperCamelCase ) _SCREAMING_SNAKE_CASE =np.shape(_UpperCamelCase ) _SCREAMING_SNAKE_CASE =np.shape(_UpperCamelCase ) if shape_a[0] != shape_b[0]: _SCREAMING_SNAKE_CASE =( 'Expected the same number of rows for A and B. ' f"Instead found A of size {shape_a} and B of size {shape_b}" ) raise ValueError(_UpperCamelCase ) if shape_b[1] != shape_c[1]: _SCREAMING_SNAKE_CASE =( 'Expected the same number of columns for B and C. ' f"Instead found B of size {shape_b} and C of size {shape_c}" ) raise ValueError(_UpperCamelCase ) _SCREAMING_SNAKE_CASE =pseudo_inv if a_inv is None: try: _SCREAMING_SNAKE_CASE =np.linalg.inv(_UpperCamelCase ) except np.linalg.LinAlgError: raise ValueError( 'Input matrix A is not invertible. Cannot compute Schur complement.' ) return mat_c - mat_b.T @ a_inv @ mat_b class A__ ( unittest.TestCase ): def A ( self : Dict ) -> None: '''simple docstring''' _SCREAMING_SNAKE_CASE =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _SCREAMING_SNAKE_CASE =np.array([[0, 3], [3, 0], [2, 3]] ) _SCREAMING_SNAKE_CASE =np.array([[2, 1], [6, 3]] ) _SCREAMING_SNAKE_CASE =schur_complement(_a , _a , _a ) _SCREAMING_SNAKE_CASE =np.block([[a, b], [b.T, c]] ) _SCREAMING_SNAKE_CASE =np.linalg.det(_a ) _SCREAMING_SNAKE_CASE =np.linalg.det(_a ) _SCREAMING_SNAKE_CASE =np.linalg.det(_a ) self.assertAlmostEqual(_a , det_a * det_s ) def A ( self : str ) -> None: '''simple docstring''' _SCREAMING_SNAKE_CASE =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _SCREAMING_SNAKE_CASE =np.array([[0, 3], [3, 0], [2, 3]] ) _SCREAMING_SNAKE_CASE =np.array([[2, 1], [6, 3]] ) with self.assertRaises(_a ): schur_complement(_a , _a , _a ) def A ( self : List[Any] ) -> None: '''simple docstring''' _SCREAMING_SNAKE_CASE =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _SCREAMING_SNAKE_CASE =np.array([[0, 3], [3, 0], [2, 3]] ) _SCREAMING_SNAKE_CASE =np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(_a ): schur_complement(_a , _a , _a ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
47
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCamelCase : List[str] = logging.get_logger(__name__) lowerCamelCase : List[Any] = { "facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json", } class A__ ( A__ , A__ ): A__ = 'convnextv2' def __init__( self : Tuple , _a : Optional[int]=3 , _a : Any=4 , _a : int=4 , _a : Union[str, Any]=None , _a : List[str]=None , _a : Optional[Any]="gelu" , _a : Any=0.02 , _a : Any=1e-12 , _a : Tuple=0.0 , _a : int=224 , _a : Any=None , _a : Optional[int]=None , **_a : List[str] , ) -> Optional[Any]: '''simple docstring''' super().__init__(**_a ) _SCREAMING_SNAKE_CASE =num_channels _SCREAMING_SNAKE_CASE =patch_size _SCREAMING_SNAKE_CASE =num_stages _SCREAMING_SNAKE_CASE =[96, 192, 384, 768] if hidden_sizes is None else hidden_sizes _SCREAMING_SNAKE_CASE =[3, 3, 9, 3] if depths is None else depths _SCREAMING_SNAKE_CASE =hidden_act _SCREAMING_SNAKE_CASE =initializer_range _SCREAMING_SNAKE_CASE =layer_norm_eps _SCREAMING_SNAKE_CASE =drop_path_rate _SCREAMING_SNAKE_CASE =image_size _SCREAMING_SNAKE_CASE =['stem'] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )] _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =get_aligned_output_features_output_indices( out_features=_a , out_indices=_a , stage_names=self.stage_names )
47
1
'''simple docstring''' def _UpperCAmelCase ( _UpperCamelCase : int ) -> None: A_ = generate_pascal_triangle(_UpperCamelCase ) for row_idx in range(_UpperCamelCase ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=''' ''' ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx], end=''' ''' ) else: print(triangle[row_idx][col_idx], end='''''' ) print() def _UpperCAmelCase ( _UpperCamelCase : int ) -> list[list[int]]: if not isinstance(_UpperCamelCase, _UpperCamelCase ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) A_ = [] for current_row_idx in range(_UpperCamelCase ): A_ = populate_current_row(_UpperCamelCase, _UpperCamelCase ) triangle.append(_UpperCamelCase ) return triangle def _UpperCAmelCase ( _UpperCamelCase : list[list[int]], _UpperCamelCase : int ) -> list[int]: A_ = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 A_ ,A_ = 1, 1 for current_col_idx in range(1, _UpperCamelCase ): calculate_current_element( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) return current_row def _UpperCAmelCase ( _UpperCamelCase : list[list[int]], _UpperCamelCase : list[int], _UpperCamelCase : int, _UpperCamelCase : int, ) -> None: A_ = triangle[current_row_idx - 1][current_col_idx - 1] A_ = triangle[current_row_idx - 1][current_col_idx] A_ = above_to_left_elt + above_to_right_elt def _UpperCAmelCase ( _UpperCamelCase : int ) -> list[list[int]]: if not isinstance(_UpperCamelCase, _UpperCamelCase ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) A_ = [[1]] for row_index in range(1, _UpperCamelCase ): A_ = [0] + result[-1] + [0] A_ = row_index + 1 # Calculate the number of distinct elements in a row A_ = sum(divmod(_UpperCamelCase, 2 ) ) A_ = [ temp_row[i - 1] + temp_row[i] for i in range(1, distinct_elements + 1 ) ] A_ = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() A_ = row_first_half + row_second_half result.append(_UpperCamelCase ) return result def _UpperCAmelCase ( ) -> None: from collections.abc import Callable from timeit import timeit def benchmark_a_function(_UpperCamelCase : Callable, _UpperCamelCase : int ) -> None: A_ = F'''{func.__name__}({value})''' A_ = timeit(F'''__main__.{call}''', setup='''import __main__''' ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'''{call:38} -- {timing:.4f} seconds''' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(_UpperCamelCase, _UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
18
'''simple docstring''' import math def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : float ) -> float: if initial_intensity < 0: raise ValueError('''The value of intensity cannot be negative''' ) # handling of negative values of initial intensity if angle < 0 or angle > 3_60: raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(_UpperCamelCase ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='malus_law')
18
1
"""simple docstring""" from collections.abc import Iterable from typing import Generic, TypeVar _A = TypeVar("""_T""") class lowerCamelCase ( Generic[_T] ): '''simple docstring''' def __init__(self , _lowerCamelCase = None ): """simple docstring""" UpperCAmelCase__ : list[_T] = list(iterable or [] ) UpperCAmelCase__ : list[_T] = [] def __len__(self ): """simple docstring""" return len(self._stacka ) + len(self._stacka ) def __repr__(self ): """simple docstring""" return F"""Queue({tuple(self._stacka[::-1] + self._stacka )})""" def _a (self , _lowerCamelCase ): """simple docstring""" self._stacka.append(SCREAMING_SNAKE_CASE__ ) def _a (self ): """simple docstring""" UpperCAmelCase__ : Dict = self._stacka.pop UpperCAmelCase__ : List[Any] = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError("""Queue is empty""" ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
171
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging __a = logging.get_logger(__name__) def __lowercase ( _UpperCamelCase ) ->List[int]: """simple docstring""" if isinstance(_UpperCamelCase, np.ndarray ): return list(tensor.shape ) lowercase : Optional[Any] = tf.shape(_UpperCamelCase ) if tensor.shape == tf.TensorShape(_UpperCamelCase ): return dynamic lowercase : Tuple = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )] def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->tf.Tensor: """simple docstring""" return tf.nn.softmax(logits=logits + 1e-9, axis=_UpperCamelCase, name=_UpperCamelCase ) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=1e-5, _UpperCamelCase=-1 ) ->int: """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase, _UpperCamelCase ): raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' ) # Get mean and variance on the axis to be normalized lowercase , lowercase : Union[str, Any] = tf.nn.moments(_UpperCamelCase, axes=[axis], keepdims=_UpperCamelCase ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase : int = [1] * inputs.shape.rank lowercase : Union[str, Any] = shape_list(_UpperCamelCase )[axis] lowercase : List[str] = tf.reshape(_UpperCamelCase, _UpperCamelCase ) lowercase : Dict = tf.reshape(_UpperCamelCase, _UpperCamelCase ) # Compute layer normalization using the batch_normalization # function. lowercase : List[str] = tf.nn.batch_normalization( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, offset=_UpperCamelCase, scale=_UpperCamelCase, variance_epsilon=_UpperCamelCase, ) return outputs def __lowercase ( _UpperCamelCase, _UpperCamelCase=0, _UpperCamelCase=-1 ) ->List[Any]: """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase : Dict = tf.shape(_UpperCamelCase ) lowercase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 ) return tf.reshape(_UpperCamelCase, _UpperCamelCase ) def __lowercase ( _UpperCamelCase ) ->tf.Tensor: """simple docstring""" if not isinstance(_UpperCamelCase, tf.Tensor ): lowercase : Optional[Any] = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase : Tuple = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase : List[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase : str = ( tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = "input_ids" ) ->None: """simple docstring""" tf.debugging.assert_less( _UpperCamelCase, tf.cast(_UpperCamelCase, dtype=tensor.dtype ), message=( f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding """ f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.""" ), ) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : List[Any] = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase : Optional[int] = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( '''The following attributes cannot be saved to HDF5 file because ''' f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """ f"""bytes: {bad_attributes}""" ) lowercase : Any = np.asarray(_UpperCamelCase ) lowercase : List[Any] = 1 lowercase : Tuple = np.array_split(_UpperCamelCase, _UpperCamelCase ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase : Dict = np.array_split(_UpperCamelCase, _UpperCamelCase ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(_UpperCamelCase ): lowercase : Optional[int] = chunk_data else: lowercase : int = data def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]: """simple docstring""" if name in group.attrs: lowercase : str = [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs[name]] else: lowercase : Optional[Any] = [] lowercase : List[str] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] ) chunk_id += 1 return data def __lowercase ( _UpperCamelCase ) ->List[str]: """simple docstring""" def _expand_single_ad_tensor(_UpperCamelCase ): if isinstance(_UpperCamelCase, tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(_UpperCamelCase, axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor, _UpperCamelCase )
337
0
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): pass class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): pass class _SCREAMING_SNAKE_CASE : def __init__( self ) -> Tuple: lowerCamelCase_ = [ [], [], [], ] def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Union[str, Any]: try: if len(self.queues[priority] ) >= 100: raise OverflowError("Maximum queue size is 100" ) self.queues[priority].append(_snake_case ) except IndexError: raise ValueError("Valid priorities are 0, 1, and 2" ) def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError("All queues are empty" ) def __str__( self ) -> int: return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues ) ) class _SCREAMING_SNAKE_CASE : def __init__( self ) -> Dict: lowerCamelCase_ = [] def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[Any]: if len(self.queue ) == 100: raise OverFlowError("Maximum queue size is 100" ) self.queue.append(_snake_case ) def SCREAMING_SNAKE_CASE_( self ) -> List[str]: if not self.queue: raise UnderFlowError("The queue is empty" ) else: lowerCamelCase_ = min(self.queue ) self.queue.remove(_snake_case ) return data def __str__( self ) -> int: return str(self.queue ) def lowerCamelCase_ ( ): lowerCamelCase_ = FixedPriorityQueue() fpq.enqueue(0 , 1_0 ) fpq.enqueue(1 , 7_0 ) fpq.enqueue(0 , 1_0_0 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 6_4 ) fpq.enqueue(0 , 1_2_8 ) print(_SCREAMING_SNAKE_CASE ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(_SCREAMING_SNAKE_CASE ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def lowerCamelCase_ ( ): lowerCamelCase_ = ElementPriorityQueue() epq.enqueue(1_0 ) epq.enqueue(7_0 ) epq.enqueue(1_0_0 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(6_4 ) epq.enqueue(1_2_8 ) print(_SCREAMING_SNAKE_CASE ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(_SCREAMING_SNAKE_CASE ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
350
import copy import re class _SCREAMING_SNAKE_CASE : lowerCAmelCase__ = 'hp' lowerCAmelCase__ = {} lowerCAmelCase__ = None @classmethod def SCREAMING_SNAKE_CASE_( cls , lowercase , lowercase ) -> Tuple: lowerCamelCase_ = prefix lowerCamelCase_ = defaults cls.build_naming_info() @staticmethod def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> Optional[Any]: if len(lowercase ) == 0: return "" lowerCamelCase_ = None if any(char.isdigit() for char in word ): raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(lowercase ) + 1 ): lowerCamelCase_ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: lowerCamelCase_ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowercase ): lowerCamelCase_ = "" while integer != 0: lowerCamelCase_ = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s lowerCamelCase_ = 0 while True: lowerCamelCase_ = word + "#" + int_to_alphabetic(lowercase ) if sword in info["reverse_short_word"]: continue else: lowerCamelCase_ = sword break lowerCamelCase_ = short_word lowerCamelCase_ = word return short_word @staticmethod def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> int: lowerCamelCase_ = param_name.split("_" ) lowerCamelCase_ = [TrialShortNamer.shortname_for_word(lowercase , lowercase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name lowerCamelCase_ = ["", "_"] for separator in separators: lowerCamelCase_ = separator.join(lowercase ) if shortname not in info["reverse_short_param"]: lowerCamelCase_ = shortname lowerCamelCase_ = param_name return shortname return param_name @staticmethod def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> Optional[Any]: lowerCamelCase_ = TrialShortNamer.shortname_for_key(lowercase , lowercase ) lowerCamelCase_ = short_name lowerCamelCase_ = param_name @classmethod def SCREAMING_SNAKE_CASE_( cls ) -> Dict: if cls.NAMING_INFO is not None: return lowerCamelCase_ = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } lowerCamelCase_ = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(lowercase , lowercase ) lowerCamelCase_ = info @classmethod def SCREAMING_SNAKE_CASE_( cls , lowercase ) -> Optional[int]: cls.build_naming_info() assert cls.PREFIX is not None lowerCamelCase_ = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'You should provide a default value for the param name {k} with value {v}' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue lowerCamelCase_ = cls.NAMING_INFO["short_param"][k] if isinstance(lowercase , lowercase ): lowerCamelCase_ = 1 if v else 0 lowerCamelCase_ = "" if isinstance(lowercase , (int, float) ) else "-" lowerCamelCase_ = f'{key}{sep}{v}' name.append(lowercase ) return "_".join(lowercase ) @classmethod def SCREAMING_SNAKE_CASE_( cls , lowercase ) -> List[Any]: lowerCamelCase_ = repr[len(cls.PREFIX ) + 1 :] if repr == "": lowerCamelCase_ = [] else: lowerCamelCase_ = repr.split("_" ) lowerCamelCase_ = {} for value in values: if "-" in value: lowerCamelCase_ , lowerCamelCase_ = value.split("-" ) else: lowerCamelCase_ = re.sub("[0-9.]" , "" , lowercase ) lowerCamelCase_ = float(re.sub("[^0-9.]" , "" , lowercase ) ) lowerCamelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k] lowerCamelCase_ = p_v for k in cls.DEFAULTS: if k not in parameters: lowerCamelCase_ = cls.DEFAULTS[k] return parameters
47
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : List[Any] = logging.get_logger(__name__) lowercase__ : Tuple = { '''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = """wav2vec2""" def __init__( self , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE="group" , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) , __SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) , __SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0_5 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=320 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="sum" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1500) , __SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) , __SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) ->Any: super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = hidden_size lowerCAmelCase = feat_extract_norm lowerCAmelCase = feat_extract_activation lowerCAmelCase = list(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = list(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = list(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = conv_bias lowerCAmelCase = num_conv_pos_embeddings lowerCAmelCase = num_conv_pos_embedding_groups lowerCAmelCase = len(self.conv_dim ) lowerCAmelCase = num_hidden_layers lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_dropout lowerCAmelCase = attention_dropout lowerCAmelCase = activation_dropout lowerCAmelCase = feat_proj_dropout lowerCAmelCase = final_dropout lowerCAmelCase = layerdrop lowerCAmelCase = layer_norm_eps lowerCAmelCase = initializer_range lowerCAmelCase = vocab_size lowerCAmelCase = do_stable_layer_norm lowerCAmelCase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`," F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase = apply_spec_augment lowerCAmelCase = mask_time_prob lowerCAmelCase = mask_time_length lowerCAmelCase = mask_time_min_masks lowerCAmelCase = mask_feature_prob lowerCAmelCase = mask_feature_length lowerCAmelCase = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCAmelCase = num_codevectors_per_group lowerCAmelCase = num_codevector_groups lowerCAmelCase = contrastive_logits_temperature lowerCAmelCase = feat_quantizer_dropout lowerCAmelCase = num_negatives lowerCAmelCase = codevector_dim lowerCAmelCase = proj_codevector_dim lowerCAmelCase = diversity_loss_weight # ctc loss lowerCAmelCase = ctc_loss_reduction lowerCAmelCase = ctc_zero_infinity # adapter lowerCAmelCase = add_adapter lowerCAmelCase = adapter_kernel_size lowerCAmelCase = adapter_stride lowerCAmelCase = num_adapter_layers lowerCAmelCase = output_hidden_size or hidden_size lowerCAmelCase = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowerCAmelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowerCAmelCase = list(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = list(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = list(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = xvector_output_dim @property def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: return functools.reduce(operator.mul , self.conv_stride , 1 )
338
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = (DEISMultistepScheduler,) UpperCAmelCase_ : int = (("""num_inference_steps""", 25),) def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str: lowerCAmelCase = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, } config.update(**__SCREAMING_SNAKE_CASE ) return config def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE ) new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCAmelCase , lowerCAmelCase = sample, sample for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ): lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: pass def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->List[Any]: lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals (must be after setting timesteps) lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residual (must be after setting timesteps) lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->List[Any]: if scheduler is None: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = 10 lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample return sample def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ): scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ): lowerCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] lowerCAmelCase = scheduler.timesteps[5] lowerCAmelCase = scheduler.timesteps[6] lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: # make sure that iterating over schedulers with same config names gives same results # for defaults lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() ) lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='''deis''' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = self.full_loop( solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , ) assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers" def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE ) self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = self.full_loop() lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: lowerCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = 10 lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample assert sample.dtype == torch.floataa
338
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "dpr" def __init__( self : Optional[int] , __lowerCamelCase : List[str]=3_05_22 , __lowerCamelCase : Dict=7_68 , __lowerCamelCase : str=12 , __lowerCamelCase : Dict=12 , __lowerCamelCase : List[Any]=30_72 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=5_12 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Union[str, Any]=1e-12 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Optional[Any]="absolute" , __lowerCamelCase : int = 0 , **__lowerCamelCase : int , ) -> List[Any]: super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase ) A : List[Any] = vocab_size A : Any = hidden_size A : List[str] = num_hidden_layers A : List[str] = num_attention_heads A : int = hidden_act A : Optional[int] = intermediate_size A : int = hidden_dropout_prob A : List[Any] = attention_probs_dropout_prob A : Union[str, Any] = max_position_embeddings A : int = type_vocab_size A : Optional[int] = initializer_range A : Tuple = layer_norm_eps A : int = projection_dim A : Optional[Any] = position_embedding_type
256
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance __SCREAMING_SNAKE_CASE = 637_8137.0 __SCREAMING_SNAKE_CASE = 635_6752.31_4245 __SCREAMING_SNAKE_CASE = 6378137 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : List[Any] = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude A : Tuple = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) ) A : Tuple = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius A : List[str] = haversine_distance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) / EQUATORIAL_RADIUS # Intermediate P and Q values A : List[Any] = (b_lata + b_lata) / 2 A : Optional[Any] = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) A : List[str] = (sin(_lowerCamelCase ) ** 2) * (cos(_lowerCamelCase ) ** 2) A : Optional[int] = cos(sigma / 2 ) ** 2 A : int = (sigma - sin(_lowerCamelCase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) A : List[str] = (cos(_lowerCamelCase ) ** 2) * (sin(_lowerCamelCase ) ** 2) A : Union[str, Any] = sin(sigma / 2 ) ** 2 A : int = (sigma + sin(_lowerCamelCase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
256
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> List[str]: '''simple docstring''' UpperCamelCase = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" UpperCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ) -> str: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: UpperCamelCase = '''''' else: UpperCamelCase = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCamelCase = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) UpperCamelCase = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[ : config.hidden_size, : ] UpperCamelCase = in_proj_bias[: config.hidden_size] UpperCamelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCamelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCamelCase = in_proj_weight[ -config.hidden_size :, : ] UpperCamelCase = in_proj_bias[-config.hidden_size :] def lowercase( UpperCamelCase_ ) -> List[str]: '''simple docstring''' UpperCamelCase = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: '''simple docstring''' UpperCamelCase = dct.pop(lowercase__ ) UpperCamelCase = val def lowercase( ) -> str: '''simple docstring''' UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCamelCase = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return im @torch.no_grad() def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]: '''simple docstring''' UpperCamelCase = ViTConfig() UpperCamelCase = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": UpperCamelCase = True UpperCamelCase = int(vit_name[-12:-10] ) UpperCamelCase = int(vit_name[-9:-6] ) else: UpperCamelCase = 1000 UpperCamelCase = '''huggingface/label-files''' UpperCamelCase = '''imagenet-1k-id2label.json''' UpperCamelCase = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) ) UpperCamelCase = {int(lowercase__ ): v for k, v in idalabel.items()} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} UpperCamelCase = int(vit_name[-6:-4] ) UpperCamelCase = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny""" ): UpperCamelCase = 192 UpperCamelCase = 768 UpperCamelCase = 12 UpperCamelCase = 3 elif vit_name[9:].startswith("""small""" ): UpperCamelCase = 384 UpperCamelCase = 1536 UpperCamelCase = 12 UpperCamelCase = 6 else: pass else: if vit_name[4:].startswith("""small""" ): UpperCamelCase = 768 UpperCamelCase = 2304 UpperCamelCase = 8 UpperCamelCase = 8 elif vit_name[4:].startswith("""base""" ): pass elif vit_name[4:].startswith("""large""" ): UpperCamelCase = 1024 UpperCamelCase = 4096 UpperCamelCase = 24 UpperCamelCase = 16 elif vit_name[4:].startswith("""huge""" ): UpperCamelCase = 1280 UpperCamelCase = 5120 UpperCamelCase = 32 UpperCamelCase = 16 # load original model from timm UpperCamelCase = timm.create_model(lowercase__ , pretrained=lowercase__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys UpperCamelCase = timm_model.state_dict() if base_model: remove_classification_head_(lowercase__ ) UpperCamelCase = create_rename_keys(lowercase__ , lowercase__ ) for src, dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ ) # load HuggingFace model if vit_name[-5:] == "in21k": UpperCamelCase = ViTModel(lowercase__ ).eval() else: UpperCamelCase = ViTForImageClassification(lowercase__ ).eval() model.load_state_dict(lowercase__ ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: UpperCamelCase = DeiTImageProcessor(size=config.image_size ) else: UpperCamelCase = ViTImageProcessor(size=config.image_size ) UpperCamelCase = image_processor(images=prepare_img() , return_tensors="""pt""" ) UpperCamelCase = encoding['''pixel_values'''] UpperCamelCase = model(lowercase__ ) if base_model: UpperCamelCase = timm_model.forward_features(lowercase__ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(lowercase__ , outputs.pooler_output , atol=1E-3 ) else: UpperCamelCase = timm_model(lowercase__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 ) Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase__ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowercase__ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_patch16_224""", type=str, help="""Name of the ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
343
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL _UpperCamelCase = logging.get_logger(__name__) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): def constraint_to_multiple_of(lowercase__ , lowercase__ , lowercase__=0 , lowercase__=None ): __lowerCAmelCase : int = round(val / multiple ) * multiple if max_val is not None and x > max_val: __lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple if x < min_val: __lowerCAmelCase : Any = math.ceil(val / multiple ) * multiple return x __lowerCAmelCase : Dict = (output_size, output_size) if isinstance(lowercase__ , lowercase__ ) else output_size __lowerCAmelCase, __lowerCAmelCase : Optional[Any] = get_image_size(lowercase__ ) __lowerCAmelCase, __lowerCAmelCase : int = output_size # determine new height and width __lowerCAmelCase : Optional[Any] = output_height / input_height __lowerCAmelCase : List[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width __lowerCAmelCase : str = scale_width else: # fit height __lowerCAmelCase : str = scale_height __lowerCAmelCase : Any = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase__ ) __lowerCAmelCase : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase__ ) return (new_height, new_width) class __lowercase (_UpperCAmelCase ): _UpperCamelCase = ["""pixel_values"""] def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = False , A_ = 1 , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ) ->None: '''simple docstring''' super().__init__(**A_ ) __lowerCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 384, '''width''': 384} __lowerCAmelCase : Dict = get_size_dict(A_ ) __lowerCAmelCase : Optional[Any] = do_resize __lowerCAmelCase : int = size __lowerCAmelCase : Dict = keep_aspect_ratio __lowerCAmelCase : List[Any] = ensure_multiple_of __lowerCAmelCase : Tuple = resample __lowerCAmelCase : Dict = do_rescale __lowerCAmelCase : Any = rescale_factor __lowerCAmelCase : List[Any] = do_normalize __lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowerCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase__ ( self , A_ , A_ , A_ = False , A_ = 1 , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) ->np.ndarray: '''simple docstring''' __lowerCAmelCase : int = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) __lowerCAmelCase : Union[str, Any] = get_resize_output_image_size( A_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=A_ , multiple=A_ , ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ = None , **A_ , ) ->Dict: '''simple docstring''' return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ = None , **A_ , ) ->np.ndarray: '''simple docstring''' return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) ->PIL.Image.Image: '''simple docstring''' __lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase : Optional[int] = size if size is not None else self.size __lowerCAmelCase : Union[str, Any] = get_size_dict(A_ ) __lowerCAmelCase : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio __lowerCAmelCase : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of __lowerCAmelCase : Tuple = resample if resample is not None else self.resample __lowerCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale __lowerCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize __lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean __lowerCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std __lowerCAmelCase : Optional[Any] = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __lowerCAmelCase : Any = [to_numpy_array(A_ ) for image in images] if do_resize: __lowerCAmelCase : Optional[Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images] if do_rescale: __lowerCAmelCase : Tuple = [self.rescale(image=A_ , scale=A_ ) for image in images] if do_normalize: __lowerCAmelCase : str = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images] __lowerCAmelCase : Union[str, Any] = [to_channel_dimension_format(A_ , A_ ) for image in images] __lowerCAmelCase : Dict = {'''pixel_values''': images} return BatchFeature(data=A_ , tensor_type=A_ ) def UpperCamelCase__ ( self , A_ , A_ = None ) ->Any: '''simple docstring''' __lowerCAmelCase : Any = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(A_ ) != len(A_ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(A_ ): __lowerCAmelCase : Optional[int] = target_sizes.numpy() __lowerCAmelCase : List[str] = [] for idx in range(len(A_ ) ): __lowerCAmelCase : Any = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ ) __lowerCAmelCase : str = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(A_ ) else: __lowerCAmelCase : Any = logits.argmax(dim=1 ) __lowerCAmelCase : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
275
0
import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class A__ ( unittest.TestCase ): """simple docstring""" @property def __lowercase ( self) -> str: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __lowercase ( self) -> List[str]: '''simple docstring''' a__ : List[str] = ort.SessionOptions() a__ : Any = False return options def __lowercase ( self) -> Tuple: '''simple docstring''' a__ : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo.png') a__ : Any = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo_mask.png') a__ : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy') # using the PNDM scheduler by default a__ : List[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase) a__ : Tuple = 'A red cat sitting on a park bench' a__ : Union[str, Any] = np.random.RandomState(0) a__ : str = pipe( prompt=lowercase , image=lowercase , mask_image=lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowercase , output_type='np' , ) a__ : Optional[int] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 1e-2
352
import enum import shutil import sys lowercase , lowercase : List[Any] = shutil.get_terminal_size() lowercase : Union[str, Any] = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""} class A__ ( enum.Enum ): """simple docstring""" __A : List[str] = 0 __A : str = 1 def A_ ( A__ , A__="" ) -> int: sys.stdout.write(str(A__ ) + end ) sys.stdout.flush() def A_ ( A__ , A__ , A__="" ) -> int: forceWrite(F'\u001b[{color}m{content}\u001b[0m' , A__ ) def A_ ( ) -> Any: forceWrite('\r' ) def A_ ( A__ , A__ ) -> List[str]: forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' ) def A_ ( ) -> Any: forceWrite(' ' * TERMINAL_WIDTH ) reset_cursor() def A_ ( ) -> Any: reset_cursor() forceWrite('-' * TERMINAL_WIDTH )
225
0
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """google/pix2struct-textcaps-base""": ( """https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json""" ), } class _A ( _a ): """simple docstring""" UpperCAmelCase : str = "pix2struct_text_model" UpperCAmelCase : Tuple = ["past_key_values"] UpperCAmelCase : List[Any] = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Optional[Any] , __UpperCAmelCase : Any=50244 , __UpperCAmelCase : Optional[Any]=768 , __UpperCAmelCase : Union[str, Any]=64 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=12 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : Optional[int]=128 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Optional[int]=1e-6 , __UpperCAmelCase : Optional[int]=1.0 , __UpperCAmelCase : Tuple="gelu_new" , __UpperCAmelCase : Tuple=0 , __UpperCAmelCase : Any=False , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : Dict=False , __UpperCAmelCase : List[Any]=True , **__UpperCAmelCase : List[Any] , ): a : Dict = vocab_size a : Any = hidden_size a : List[str] = d_kv a : Any = d_ff a : Any = num_layers a : Optional[Any] = num_heads a : Any = relative_attention_num_buckets a : Dict = relative_attention_max_distance a : List[Any] = dropout_rate a : Dict = layer_norm_epsilon a : Optional[Any] = initializer_factor a : Tuple = use_cache a : int = eos_token_id a : Optional[Any] = decoder_start_token_id # for backwards compatibility a : int = dense_act_fn super().__init__( pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , is_decoder=_lowerCAmelCase , **_lowerCAmelCase , ) @classmethod def __snake_case ( cls : Optional[Any] , __UpperCAmelCase : List[Any] , **__UpperCAmelCase : int): cls._set_token_in_kwargs(_lowerCAmelCase) a , a : int = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type") == "pix2struct": a : Optional[Any] = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''') return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase) class _A ( _a ): """simple docstring""" UpperCAmelCase : Tuple = "pix2struct_vision_model" def __init__( self : Tuple , __UpperCAmelCase : List[Any]=768 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : int=64 , __UpperCAmelCase : int=12 , __UpperCAmelCase : Optional[Any]=12 , __UpperCAmelCase : Union[str, Any]="gelu_new" , __UpperCAmelCase : str=1e-6 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Optional[Any]=1e-10 , __UpperCAmelCase : List[str]=1.0 , __UpperCAmelCase : Optional[Any]=4096 , __UpperCAmelCase : Union[str, Any]=32 , __UpperCAmelCase : str=128 , **__UpperCAmelCase : List[str] , ): super().__init__(**_lowerCAmelCase) a : Optional[Any] = hidden_size a : List[Any] = patch_embed_hidden_size a : Optional[Any] = d_ff a : List[Any] = dropout_rate a : Union[str, Any] = num_hidden_layers a : Tuple = num_attention_heads a : List[str] = initializer_range a : List[str] = initializer_factor a : int = attention_dropout a : int = layer_norm_eps a : List[str] = dense_act_fn a : List[Any] = seq_len a : int = relative_attention_num_buckets a : List[Any] = relative_attention_max_distance a : Union[str, Any] = d_kv @classmethod def __snake_case ( cls : Optional[int] , __UpperCAmelCase : List[str] , **__UpperCAmelCase : Tuple): cls._set_token_in_kwargs(_lowerCAmelCase) a , a : int = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type") == "pix2struct": a : List[Any] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''') return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase) class _A ( _a ): """simple docstring""" UpperCAmelCase : int = "pix2struct" UpperCAmelCase : Dict = True def __init__( self : List[str] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : str=1.0 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : List[str]=True , **__UpperCAmelCase : str , ): super().__init__(tie_word_embeddings=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase) if text_config is None: a : Optional[Any] = {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values.") if vision_config is None: a : int = {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values.") a : List[str] = PixaStructTextConfig(**_lowerCAmelCase) a : int = PixaStructVisionConfig(**_lowerCAmelCase) a : List[str] = self.text_config.decoder_start_token_id a : Union[str, Any] = self.text_config.pad_token_id a : Tuple = self.text_config.eos_token_id a : Dict = initializer_factor a : int = initializer_range a : Union[str, Any] = self.initializer_range a : Dict = self.initializer_range a : Dict = is_vqa @classmethod def __snake_case ( cls : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , **__UpperCAmelCase : int): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowerCAmelCase) def __snake_case ( self : List[str]): a : str = copy.deepcopy(self.__dict__) a : Optional[int] = self.text_config.to_dict() a : List[str] = self.vision_config.to_dict() a : Union[str, Any] = self.__class__.model_type return output
40
'''simple docstring''' from __future__ import annotations def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ): '''simple docstring''' if days_between_payments <= 0: raise ValueError("days_between_payments must be > 0" ) if daily_interest_rate < 0: raise ValueError("daily_interest_rate must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return principal * daily_interest_rate * days_between_payments def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ): '''simple docstring''' if number_of_compounding_periods <= 0: raise ValueError("number_of_compounding_periods must be > 0" ) if nominal_annual_interest_rate_percentage < 0: raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ): '''simple docstring''' if number_of_years <= 0: raise ValueError("number_of_years must be > 0" ) if nominal_annual_percentage_rate < 0: raise ValueError("nominal_annual_percentage_rate must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return compound_interest( SCREAMING_SNAKE_CASE_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
158
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCamelCase : Union[str, Any] = { "configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[int] = ["MobileViTFeatureExtractor"] _lowerCamelCase : List[str] = ["MobileViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[int] = [ "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileViTForImageClassification", "MobileViTForSemanticSegmentation", "MobileViTModel", "MobileViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileViTForImageClassification", "TFMobileViTForSemanticSegmentation", "TFMobileViTModel", "TFMobileViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys _lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
249
'''simple docstring''' import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[int]: """simple docstring""" UpperCamelCase = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value') UpperCamelCase = ( ('layer.', 'layer_'), ('word_embeddings.weight', 'word_embeddings'), ('position_embeddings.weight', 'position_embeddings'), ('token_type_embeddings.weight', 'token_type_embeddings'), ('.', '/'), ('LayerNorm/weight', 'LayerNorm/gamma'), ('LayerNorm/bias', 'LayerNorm/beta'), ('weight', 'kernel'), ) if not os.path.isdir(A__ ): os.makedirs(A__ ) UpperCamelCase = model.state_dict() def to_tf_var_name(A__ ): for patt, repl in iter(A__ ): UpperCamelCase = name.replace(A__ , A__ ) return F"""bert/{name}""" def create_tf_var(A__ , A__ , A__ ): UpperCamelCase = tf.dtypes.as_dtype(tensor.dtype ) UpperCamelCase = tf.get_variable(dtype=A__ , shape=tensor.shape , name=A__ , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(A__ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: UpperCamelCase = to_tf_var_name(A__ ) UpperCamelCase = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): UpperCamelCase = torch_tensor.T UpperCamelCase = create_tf_var(tensor=A__ , name=A__ , session=A__ ) tf.keras.backend.set_value(A__ , A__ ) UpperCamelCase = session.run(A__ ) print(F"""Successfully created {tf_name}: {np.allclose(A__ , A__ )}""" ) UpperCamelCase = tf.train.Saver(tf.trainable_variables() ) saver.save(A__ , os.path.join(A__ , model_name.replace('-' , '_' ) + '.ckpt' ) ) def __lowerCamelCase ( A__=None ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = argparse.ArgumentParser() parser.add_argument('--model_name' , type=A__ , required=A__ , help='model name e.g. bert-base-uncased' ) parser.add_argument( '--cache_dir' , type=A__ , default=A__ , required=A__ , help='Directory containing pytorch model' ) parser.add_argument('--pytorch_model_path' , type=A__ , required=A__ , help='/path/to/<pytorch-model-name>.bin' ) parser.add_argument('--tf_cache_dir' , type=A__ , required=A__ , help='Directory in which to save tensorflow model' ) UpperCamelCase = parser.parse_args(A__ ) UpperCamelCase = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=A__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
249
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig snake_case_ = [ """openmmlab/upernet-convnext-tiny""", # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring snake_case_ = """UperNetConfig""" class A_ ( nn.Module ): """simple docstring""" def __init__( self :Tuple , lowercase_ :int , lowercase_ :int , lowercase_ :Union[int, Tuple[int, int]] , lowercase_ :Union[int, Tuple[int, int], str] = 0 , lowercase_ :bool = False , lowercase_ :Union[int, Tuple[int, int]] = 1 , ) -> None: super().__init__() UpperCAmelCase = nn.Convad( in_channels=lowercase_ , out_channels=lowercase_ , kernel_size=lowercase_ , padding=lowercase_ , bias=lowercase_ , dilation=lowercase_ , ) UpperCAmelCase = nn.BatchNormad(lowercase_ ) UpperCAmelCase = nn.ReLU() def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :torch.Tensor ) -> torch.Tensor: UpperCAmelCase = self.conv(lowercase_ ) UpperCAmelCase = self.batch_norm(lowercase_ ) UpperCAmelCase = self.activation(lowercase_ ) return output class A_ ( nn.Module ): """simple docstring""" def __init__( self :Optional[Any] , lowercase_ :int , lowercase_ :int , lowercase_ :int ) -> None: super().__init__() UpperCAmelCase = [ nn.AdaptiveAvgPoolad(lowercase_ ), UperNetConvModule(lowercase_ , lowercase_ , kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(lowercase_ ) , lowercase_ ) def UpperCAmelCase__ ( self :List[str] , lowercase_ :torch.Tensor ) -> torch.Tensor: UpperCAmelCase = input for layer in self.layers: UpperCAmelCase = layer(lowercase_ ) return hidden_state class A_ ( nn.Module ): """simple docstring""" def __init__( self :Optional[Any] , lowercase_ :Tuple[int, ...] , lowercase_ :int , lowercase_ :int , lowercase_ :bool ) -> None: super().__init__() UpperCAmelCase = pool_scales UpperCAmelCase = align_corners UpperCAmelCase = in_channels UpperCAmelCase = channels UpperCAmelCase = [] for i, pool_scale in enumerate(lowercase_ ): UpperCAmelCase = UperNetPyramidPoolingBlock(pool_scale=lowercase_ , in_channels=lowercase_ , channels=lowercase_ ) self.blocks.append(lowercase_ ) self.add_module(str(lowercase_ ) , lowercase_ ) def UpperCAmelCase__ ( self :List[Any] , lowercase_ :torch.Tensor ) -> List[torch.Tensor]: UpperCAmelCase = [] for ppm in self.blocks: UpperCAmelCase = ppm(lowercase_ ) UpperCAmelCase = nn.functional.interpolate( lowercase_ , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners ) ppm_outs.append(lowercase_ ) return ppm_outs class A_ ( nn.Module ): """simple docstring""" def __init__( self :Dict , lowercase_ :Optional[Any] , lowercase_ :Optional[int] ) -> Any: super().__init__() UpperCAmelCase = config UpperCAmelCase = config.pool_scales # e.g. (1, 2, 3, 6) UpperCAmelCase = in_channels UpperCAmelCase = config.hidden_size UpperCAmelCase = False UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) # PSP Module UpperCAmelCase = UperNetPyramidPoolingModule( self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , ) UpperCAmelCase = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) # FPN Module UpperCAmelCase = nn.ModuleList() UpperCAmelCase = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer UpperCAmelCase = UperNetConvModule(lowercase_ , self.channels , kernel_size=1 ) UpperCAmelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 ) self.lateral_convs.append(lowercase_ ) self.fpn_convs.append(lowercase_ ) UpperCAmelCase = UperNetConvModule( len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) def UpperCAmelCase__ ( self :Optional[Any] ) -> List[Any]: self.apply(self._init_weights ) def UpperCAmelCase__ ( self :str , lowercase_ :Union[str, Any] ) -> str: if isinstance(lowercase_ , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def UpperCAmelCase__ ( self :Dict , lowercase_ :int ) -> int: UpperCAmelCase = inputs[-1] UpperCAmelCase = [x] psp_outs.extend(self.psp_modules(lowercase_ ) ) UpperCAmelCase = torch.cat(lowercase_ , dim=1 ) UpperCAmelCase = self.bottleneck(lowercase_ ) return output def UpperCAmelCase__ ( self :str , lowercase_ :torch.Tensor ) -> torch.Tensor: # build laterals UpperCAmelCase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(lowercase_ ) ) # build top-down path UpperCAmelCase = len(lowercase_ ) for i in range(used_backbone_levels - 1 , 0 , -1 ): UpperCAmelCase = laterals[i - 1].shape[2:] UpperCAmelCase = laterals[i - 1] + nn.functional.interpolate( laterals[i] , size=lowercase_ , mode='bilinear' , align_corners=self.align_corners ) # build outputs UpperCAmelCase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 , 0 , -1 ): UpperCAmelCase = nn.functional.interpolate( fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners ) UpperCAmelCase = torch.cat(lowercase_ , dim=1 ) UpperCAmelCase = self.fpn_bottleneck(lowercase_ ) UpperCAmelCase = self.classifier(lowercase_ ) return output class A_ ( nn.Module ): """simple docstring""" def __init__( self :Optional[Any] , lowercase_ :Optional[Any] , lowercase_ :int = 2 , lowercase_ :int = 3 , lowercase_ :Union[int, Tuple[int, int]] = 1 ) -> None: super().__init__() UpperCAmelCase = config UpperCAmelCase = config.auxiliary_in_channels UpperCAmelCase = config.auxiliary_channels UpperCAmelCase = config.auxiliary_num_convs UpperCAmelCase = config.auxiliary_concat_input UpperCAmelCase = in_index UpperCAmelCase = (kernel_size // 2) * dilation UpperCAmelCase = [] convs.append( UperNetConvModule( self.in_channels , self.channels , kernel_size=lowercase_ , padding=lowercase_ , dilation=lowercase_ ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels , self.channels , kernel_size=lowercase_ , padding=lowercase_ , dilation=lowercase_ ) ) if self.num_convs == 0: UpperCAmelCase = nn.Identity() else: UpperCAmelCase = nn.Sequential(*lowercase_ ) if self.concat_input: UpperCAmelCase = UperNetConvModule( self.in_channels + self.channels , self.channels , kernel_size=lowercase_ , padding=kernel_size // 2 ) UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) def UpperCAmelCase__ ( self :List[str] ) -> Dict: self.apply(self._init_weights ) def UpperCAmelCase__ ( self :int , lowercase_ :Any ) -> List[Any]: if isinstance(lowercase_ , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def UpperCAmelCase__ ( self :Dict , lowercase_ :torch.Tensor ) -> torch.Tensor: # just take the relevant feature maps UpperCAmelCase = encoder_hidden_states[self.in_index] UpperCAmelCase = self.convs(lowercase_ ) if self.concat_input: UpperCAmelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) ) UpperCAmelCase = self.classifier(lowercase_ ) return output class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = UperNetConfig __UpperCamelCase = """pixel_values""" __UpperCamelCase = True def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[str] ) -> Union[str, Any]: if isinstance(lowercase_ , lowercase_ ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def UpperCAmelCase__ ( self :int ) -> Tuple: self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def UpperCAmelCase__ ( self :Any , lowercase_ :Dict , lowercase_ :Union[str, Any]=False ) -> Optional[int]: if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = value snake_case_ = R""" Parameters: This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. config ([`UperNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ snake_case_ = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( """UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , SCREAMING_SNAKE_CASE_ , ) class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self :int , lowercase_ :Tuple ) -> int: super().__init__(lowercase_ ) UpperCAmelCase = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) UpperCAmelCase = UperNetHead(lowercase_ , in_channels=self.backbone.channels ) UpperCAmelCase = UperNetFCNHead(lowercase_ ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) ) @replace_return_docstrings(output_type=lowercase_ , config_class=_CONFIG_FOR_DOC ) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[torch.Tensor] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[torch.Tensor] = None , lowercase_ :Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]: UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions UpperCAmelCase = self.backbone.forward_with_filtered_kwargs( lowercase_ , output_hidden_states=lowercase_ , output_attentions=lowercase_ ) UpperCAmelCase = outputs.feature_maps UpperCAmelCase = self.decode_head(lowercase_ ) UpperCAmelCase = nn.functional.interpolate(lowercase_ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=lowercase_ ) UpperCAmelCase = None if self.auxiliary_head is not None: UpperCAmelCase = self.auxiliary_head(lowercase_ ) UpperCAmelCase = nn.functional.interpolate( lowercase_ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=lowercase_ ) UpperCAmelCase = None if labels is not None: if self.config.num_labels == 1: raise ValueError('The number of labels should be greater than one' ) else: # compute weighted loss UpperCAmelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) UpperCAmelCase = loss_fct(lowercase_ , lowercase_ ) UpperCAmelCase = loss_fct(lowercase_ , lowercase_ ) UpperCAmelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: UpperCAmelCase = (logits,) + outputs[1:] else: UpperCAmelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
78
"""simple docstring""" def __magic_name__ ( lowercase , lowercase ): if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) SCREAMING_SNAKE_CASE_: Optional[int] =str(bin(lowercase ) )[2:] # remove the leading "0b" SCREAMING_SNAKE_CASE_: Any =str(bin(lowercase ) )[2:] SCREAMING_SNAKE_CASE_: Dict =max(len(lowercase ) , len(lowercase ) ) return "0b" + "".join( str(int("""1""" in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
173
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class lowerCAmelCase__ : """simple docstring""" lowerCAmelCase__ = MBartConfig lowerCAmelCase__ = {} lowerCAmelCase__ = "gelu" def __init__( self : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int=13 , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : List[str]=99 , __SCREAMING_SNAKE_CASE : List[str]=32 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=20 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Tuple=1 , __SCREAMING_SNAKE_CASE : List[str]=0 , ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = bos_token_id def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __SCREAMING_SNAKE_CASE = prepare_mbart_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return config, inputs_dict def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModel(config=__SCREAMING_SNAKE_CASE ).get_decoder() __SCREAMING_SNAKE_CASE = inputs_dict["""input_ids"""] __SCREAMING_SNAKE_CASE = input_ids[:1, :] __SCREAMING_SNAKE_CASE = inputs_dict["""attention_mask"""][:1, :] __SCREAMING_SNAKE_CASE = inputs_dict["""head_mask"""] __SCREAMING_SNAKE_CASE = 1 # first forward pass __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() __SCREAMING_SNAKE_CASE = past_key_values[1] def a__ ( a__ , a__ , a__ , a__=None , a__=None , a__=None , a__=None , a__=None , ): """simple docstring""" if attention_mask is None: __SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(a__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __SCREAMING_SNAKE_CASE = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCAmelCase__ ( a , a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () lowerCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase__ = ( { "conversational": TFMBartForConditionalGeneration, "feature-extraction": TFMBartModel, "summarization": TFMBartForConditionalGeneration, "text2text-generation": TFMBartForConditionalGeneration, "translation": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = False def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]: """simple docstring""" if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Dict ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE ) @require_sentencepiece @require_tokenizers @require_tf class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = [ " UN Chief Says There Is No Military Solution in Syria", ] lowerCAmelCase__ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", ] lowerCAmelCase__ = "facebook/mbart-large-en-ro" @cached_property def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCAmelCase__ ( self : Optional[int] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def UpperCAmelCase__ ( self : List[str] , **__SCREAMING_SNAKE_CASE : str ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.translate_src_text(**__SCREAMING_SNAKE_CASE ) self.assertListEqual(self.expected_text , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : int , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **__SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) __SCREAMING_SNAKE_CASE = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) __SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) return generated_words @slow def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any: """simple docstring""" self._assert_generated_batch_equal_expected()
331
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : int = logging.get_logger(__name__) UpperCAmelCase : Union[str, Any] = { 'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json', 'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json', } class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "markuplm" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=30_522 , __SCREAMING_SNAKE_CASE : Optional[Any]=768 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : str=3_072 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-12 , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=256 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_024 , __SCREAMING_SNAKE_CASE : Dict=216 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_001 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : str=50 , __SCREAMING_SNAKE_CASE : int="absolute" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Tuple: """simple docstring""" super().__init__( pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout # additional properties __SCREAMING_SNAKE_CASE = max_depth __SCREAMING_SNAKE_CASE = max_xpath_tag_unit_embeddings __SCREAMING_SNAKE_CASE = max_xpath_subs_unit_embeddings __SCREAMING_SNAKE_CASE = tag_pad_id __SCREAMING_SNAKE_CASE = subs_pad_id __SCREAMING_SNAKE_CASE = xpath_unit_hidden_size
331
1
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCAmelCase__ = 2_5_0_0_0_4 lowerCAmelCase__ = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class a__ ( snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = MBartaaTokenizer __lowerCamelCase = MBartaaTokenizerFast __lowerCamelCase = True __lowerCamelCase = True def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = MBartaaTokenizer(lowercase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = "<s>" A__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(lowercase ) , 1054 ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = MBartaaTokenizer(lowercase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=lowercase ) A__ = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) A__ = tokenizer.convert_tokens_to_ids(lowercase ) self.assertListEqual( lowercase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) A__ = tokenizer.convert_ids_to_tokens(lowercase ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = {"input_ids": [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , ) def UpperCamelCase ( self ) -> str: '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return A__ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A__ = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase ) A__ = self.tokenizer_class.from_pretrained(lowercase , **lowercase ) A__ = tempfile.mkdtemp() A__ = tokenizer_r.save_pretrained(lowercase ) A__ = tokenizer_p.save_pretrained(lowercase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) A__ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(lowercase , lowercase ) # Checks everything loads correctly in the same way A__ = tokenizer_r.from_pretrained(lowercase ) A__ = tokenizer_p.from_pretrained(lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase , lowercase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowercase ) # Save tokenizer rust, legacy_format=True A__ = tempfile.mkdtemp() A__ = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase ) A__ = tokenizer_p.save_pretrained(lowercase ) # Checks it save with the same files self.assertSequenceEqual(lowercase , lowercase ) # Checks everything loads correctly in the same way A__ = tokenizer_r.from_pretrained(lowercase ) A__ = tokenizer_p.from_pretrained(lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase , lowercase ) ) shutil.rmtree(lowercase ) # Save tokenizer rust, legacy_format=False A__ = tempfile.mkdtemp() A__ = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase ) A__ = tokenizer_p.save_pretrained(lowercase ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way A__ = tokenizer_r.from_pretrained(lowercase ) A__ = tokenizer_p.from_pretrained(lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase , lowercase ) ) shutil.rmtree(lowercase ) @require_torch @require_sentencepiece @require_tokenizers class a__ ( unittest.TestCase ): """simple docstring""" __lowerCamelCase = 'facebook/mbart-large-50-one-to-many-mmt' __lowerCamelCase = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] __lowerCamelCase = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] __lowerCamelCase = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2] @classmethod def UpperCamelCase ( cls ) -> Tuple: '''simple docstring''' A__ = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" ) A__ = 1 return cls def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 250038 ) def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowercase ) def UpperCamelCase ( self ) -> int: '''simple docstring''' self.assertIn(lowercase , self.tokenizer.all_special_ids ) A__ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] A__ = self.tokenizer.decode(lowercase , skip_special_tokens=lowercase ) A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase ) self.assertEqual(lowercase , lowercase ) self.assertNotIn(self.tokenizer.eos_token , lowercase ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , lowercase ) A__ = 10 A__ = self.tokenizer(lowercase , max_length=lowercase , truncation=lowercase ).input_ids[0] self.assertEqual(ids[0] , lowercase ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(lowercase ) , lowercase ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250053, 250001] ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = tempfile.mkdtemp() A__ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowercase ) A__ = MBartaaTokenizer.from_pretrained(lowercase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase ) @require_torch def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase , return_tensors="pt" ) A__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def UpperCamelCase ( self ) -> str: '''simple docstring''' A__ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) A__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) A__ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowercase ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = self.tokenizer(self.src_text , padding=lowercase , truncation=lowercase , max_length=3 , return_tensors="pt" ) A__ = self.tokenizer( text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=10 , return_tensors="pt" ) A__ = targets["input_ids"] A__ = shift_tokens_right(lowercase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" ) self.assertEqual( nested_simplify(lowercase ) , { # en_XX, A, test, EOS "input_ids": [[250004, 62, 3034, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, } , )
68
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a_ ( a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline __SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] __SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] __SCREAMING_SNAKE_CASE : List[str] = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] __SCREAMING_SNAKE_CASE : List[Any] = False @property def __lowerCAmelCase ( self ) ->Optional[Any]: return 32 @property def __lowerCAmelCase ( self ) ->Optional[int]: return 32 @property def __lowerCAmelCase ( self ) ->str: return self.time_input_dim @property def __lowerCAmelCase ( self ) ->Dict: return self.time_input_dim * 4 @property def __lowerCAmelCase ( self ) ->Tuple: return 100 @property def __lowerCAmelCase ( self ) ->int: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = { '''in_channels''': 8, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image_hint''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase ) return model @property def __lowerCAmelCase ( self ) ->Any: return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __lowerCAmelCase ( self ) ->Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs ) return model def __lowerCAmelCase ( self ) ->List[str]: SCREAMING_SNAKE_CASE : str = self.dummy_unet SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq SCREAMING_SNAKE_CASE : List[str] = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int: SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _lowerCamelCase ) # create init_image SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) ) # create hint SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) if str(_lowerCamelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''hint''': hint, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : str = output.images SCREAMING_SNAKE_CASE : Any = pipe( **self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0] SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : Union[str, Any] = np.array( [0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' ) SCREAMING_SNAKE_CASE : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) ) SCREAMING_SNAKE_CASE : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/hint_image_cat.png''' ) SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0 SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo''' SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase ) pipeline.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior( _lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE : List[str] = pipeline( image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Any = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
313
0
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Dict ): SCREAMING_SNAKE_CASE_ = inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] ) SCREAMING_SNAKE_CASE_ = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] ) SCREAMING_SNAKE_CASE_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] ) @require_multi_gpu def lowerCAmelCase_ ( self : Tuple ): print(F"Found {torch.cuda.device_count()} devices." ) SCREAMING_SNAKE_CASE_ = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() ) @require_multi_gpu def lowerCAmelCase_ ( self : Optional[Any] ): print(F"Found {torch.cuda.device_count()} devices." ) SCREAMING_SNAKE_CASE_ = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path] print(F"Command: {cmd}" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() ) @require_multi_gpu def lowerCAmelCase_ ( self : Optional[int] ): SCREAMING_SNAKE_CASE_ = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() ) @require_multi_gpu def lowerCAmelCase_ ( self : str ): print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" ) SCREAMING_SNAKE_CASE_ = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ): execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowerCamelCase__ : int = Accelerator() lowerCamelCase__ : Dict = (accelerator.state.process_index + 2, 10) lowerCamelCase__ : Any = torch.randint(0, 10, shape).to(accelerator.device) lowerCamelCase__ : Union[str, Any] = '' lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." lowerCamelCase__ : List[str] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." lowerCamelCase__ : Union[str, Any] = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
210
from abc import ABC, abstractmethod from argparse import ArgumentParser class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' @staticmethod @abstractmethod def lowerCAmelCase_ ( _lowerCAmelCase : ArgumentParser ): raise NotImplementedError() @abstractmethod def lowerCAmelCase_ ( self : Dict ): raise NotImplementedError()
210
1
'''simple docstring''' import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = parent __UpperCAmelCase : str = batch_size __UpperCAmelCase : Any = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : Optional[int] = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : Union[str, Any] = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : int = num_hidden_layers __UpperCAmelCase : Dict = num_attention_heads __UpperCAmelCase : List[Any] = intermediate_size __UpperCAmelCase : List[Any] = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : int = attention_probs_dropout_prob __UpperCAmelCase : Tuple = max_position_embeddings __UpperCAmelCase : Optional[int] = type_vocab_size __UpperCAmelCase : List[str] = type_sequence_label_size __UpperCAmelCase : str = initializer_range __UpperCAmelCase : str = num_labels __UpperCAmelCase : str = num_choices __UpperCAmelCase : Dict = scope def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Tuple = None if self.use_input_mask: __UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Optional[Any] = None if self.use_token_type_ids: __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Tuple = None __UpperCAmelCase : List[str] = None __UpperCAmelCase : Union[str, Any] = None if self.use_labels: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> Optional[int]: '''simple docstring''' return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , use_stable_embedding=__UpperCAmelCase , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : str = OpenLlamaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) __UpperCAmelCase : Dict = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Tuple = True __UpperCAmelCase : List[str] = OpenLlamaModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Optional[Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __UpperCAmelCase : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : str = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> int: '''simple docstring''' __UpperCAmelCase : int = True __UpperCAmelCase : Optional[Any] = True __UpperCAmelCase : Any = OpenLlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # first forward pass __UpperCAmelCase : Optional[int] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) __UpperCAmelCase : List[str] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __UpperCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __UpperCAmelCase : int = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 ) __UpperCAmelCase : Union[str, Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] __UpperCAmelCase : Optional[int] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] # select random slice __UpperCAmelCase : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCAmelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = config_and_inputs __UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Any = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE : Dict = (OpenLlamaForCausalLM,) if is_torch_available() else () _SCREAMING_SNAKE_CASE : Union[str, Any] = ( { "feature-extraction": OpenLlamaModel, "text-classification": OpenLlamaForSequenceClassification, "text-generation": OpenLlamaForCausalLM, "zero-shot": OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : str = False _SCREAMING_SNAKE_CASE : Tuple = False def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Tuple = OpenLlamaModelTester(self ) __UpperCAmelCase : Any = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __A ( self ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : Optional[Any] = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Tuple = 3 __UpperCAmelCase : Tuple = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Any = OpenLlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[Any] = 3 __UpperCAmelCase : Dict = """single_label_classification""" __UpperCAmelCase : List[str] = input_dict["""input_ids"""] __UpperCAmelCase : List[str] = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Dict = OpenLlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Any = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Any = 3 __UpperCAmelCase : int = """multi_label_classification""" __UpperCAmelCase : Dict = input_dict["""input_ids"""] __UpperCAmelCase : Dict = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : List[str] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __UpperCAmelCase : Tuple = OpenLlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Any = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" ) def __A ( self ) -> List[str]: '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def __A ( self , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : str = ids_tensor([1, 10] , config.vocab_size ) __UpperCAmelCase : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Optional[Any] = OpenLlamaModel(__UpperCAmelCase ) original_model.to(__UpperCAmelCase ) original_model.eval() __UpperCAmelCase : str = original_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : Tuple = original_model(__UpperCAmelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Union[str, Any] = {"""type""": scaling_type, """factor""": 10.0} __UpperCAmelCase : Optional[int] = OpenLlamaModel(__UpperCAmelCase ) scaled_model.to(__UpperCAmelCase ) scaled_model.eval() __UpperCAmelCase : Optional[Any] = scaled_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : Any = scaled_model(__UpperCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
254
'''simple docstring''' def lowercase_ ( lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : list[list[int]] = [[0 for _ in range(lowerCAmelCase__ )] for _ in range(m + 1 )] for i in range(m + 1 ): __UpperCAmelCase : str = 1 for n in range(m + 1 ): for k in range(1 , lowerCAmelCase__ ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: _UpperCamelCase = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: _UpperCamelCase = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
254
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor A : Dict = logging.get_logger(__name__) class A ( a_ ): '''simple docstring''' def __init__(self : Any , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Union[str, Any] ) -> None: """simple docstring""" warnings.warn( """The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use FlavaImageProcessor instead.""" , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
364
from functools import lru_cache def UpperCamelCase ( __magic_name__ : int ) -> set: """simple docstring""" lowercase__ = 2 lowercase__ = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__magic_name__ ) if n > 1: factors.add(__magic_name__ ) return factors @lru_cache def UpperCamelCase ( __magic_name__ : int ) -> int: """simple docstring""" return len(unique_prime_factors(__magic_name__ ) ) def UpperCamelCase ( __magic_name__ : list ) -> bool: """simple docstring""" return len(set(__magic_name__ ) ) in (0, 1) def UpperCamelCase ( __magic_name__ : int ) -> list: """simple docstring""" lowercase__ = 2 while True: # Increment each value of a generated range lowercase__ = [base + i for i in range(__magic_name__ )] # Run elements through out unique_prime_factors function # Append our target number to the end. lowercase__ = [upf_len(__magic_name__ ) for x in group] checker.append(__magic_name__ ) # If all numbers in the list are equal, return the group variable. if equality(__magic_name__ ): return group # Increment our base variable by 1 base += 1 def UpperCamelCase ( __magic_name__ : int = 4 ) -> int: """simple docstring""" lowercase__ = run(__magic_name__ ) return results[0] if len(__magic_name__ ) else None if __name__ == "__main__": print(solution())
146
0
import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) class a__ ( A__ ): A = CLIPConfig A = ['CLIPEncoderLayer'] def __init__( self : Union[str, Any],_A : CLIPConfig ): """simple docstring""" super().__init__(_A ) SCREAMING_SNAKE_CASE_ : List[Any] = CLIPVisionModelWithProjection(config.vision_config ) SCREAMING_SNAKE_CASE_ : int = nn.Linear(config.vision_config.projection_dim,1 ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.Linear(config.vision_config.projection_dim,1 ) @torch.no_grad() def __UpperCamelCase ( self : Union[str, Any],_A : int,_A : Union[str, Any],_A : Optional[Any]=0.5,_A : Optional[Any]=0.5 ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.vision_model(_A )[0] SCREAMING_SNAKE_CASE_ : Tuple = self.p_head(_A ) SCREAMING_SNAKE_CASE_ : List[str] = nsfw_detected.flatten() SCREAMING_SNAKE_CASE_ : List[Any] = nsfw_detected > p_threshold SCREAMING_SNAKE_CASE_ : Any = nsfw_detected.tolist() if any(_A ): logger.warning( "Potential NSFW content was detected in one or more images. A black image will be returned instead." " Try again with a different prompt and/or seed." ) for idx, nsfw_detected_ in enumerate(_A ): if nsfw_detected_: SCREAMING_SNAKE_CASE_ : Optional[int] = np.zeros(images[idx].shape ) SCREAMING_SNAKE_CASE_ : List[str] = self.w_head(_A ) SCREAMING_SNAKE_CASE_ : int = watermark_detected.flatten() SCREAMING_SNAKE_CASE_ : List[str] = watermark_detected > w_threshold SCREAMING_SNAKE_CASE_ : Optional[int] = watermark_detected.tolist() if any(_A ): logger.warning( "Potential watermarked content was detected in one or more images. A black image will be returned instead." " Try again with a different prompt and/or seed." ) for idx, watermark_detected_ in enumerate(_A ): if watermark_detected_: SCREAMING_SNAKE_CASE_ : Optional[int] = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
18
from functools import lru_cache @lru_cache def _snake_case ( lowerCAmelCase : int ): """simple docstring""" if num < 0: raise ValueError("Number should not be negative." ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
18
1
import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html snake_case_ = '''platform''' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class SCREAMING_SNAKE_CASE__ : __lowerCamelCase : int = PegasusConfig __lowerCamelCase : Union[str, Any] = {} __lowerCamelCase : List[str] = """gelu""" def __init__( self , a , a=13 , a=7 , a=True , a=False , a=99 , a=32 , a=5 , a=4 , a=37 , a=0.1 , a=0.1 , a=20 , a=2 , a=1 , a=0 , ): lowercase__ : str = parent lowercase__ : Dict = batch_size lowercase__ : int = seq_length lowercase__ : str = is_training lowercase__ : List[Any] = use_labels lowercase__ : Optional[Any] = vocab_size lowercase__ : str = hidden_size lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : str = num_attention_heads lowercase__ : Union[str, Any] = intermediate_size lowercase__ : Union[str, Any] = hidden_dropout_prob lowercase__ : List[Any] = attention_probs_dropout_prob lowercase__ : Dict = max_position_embeddings lowercase__ : List[str] = eos_token_id lowercase__ : List[str] = pad_token_id lowercase__ : List[str] = bos_token_id def snake_case_ ( self): lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size) lowercase__ : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1) lowercase__ : Optional[Any] = np.concatenate([input_ids, eos_tensor] , axis=1) lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : Optional[int] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowercase__ : Tuple = prepare_pegasus_inputs_dict(__lowercase , __lowercase , __lowercase) return config, inputs_dict def snake_case_ ( self , a , a , a): lowercase__ : Union[str, Any] = 20 lowercase__ : List[str] = model_class_name(__lowercase) lowercase__ : str = model.encode(inputs_dict['input_ids']) lowercase__ : str = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowercase__ : str = model.init_cache(decoder_input_ids.shape[0] , __lowercase , __lowercase) lowercase__ : str = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4') lowercase__ : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowercase__ : int = model.decode( decoder_input_ids[:, :-1] , __lowercase , decoder_attention_mask=__lowercase , past_key_values=__lowercase , decoder_position_ids=__lowercase , ) lowercase__ : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4') lowercase__ : Union[str, Any] = model.decode( decoder_input_ids[:, -1:] , __lowercase , decoder_attention_mask=__lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowercase , ) lowercase__ : Dict = model.decode(__lowercase , __lowercase) lowercase__ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""") def snake_case_ ( self , a , a , a): lowercase__ : Any = 20 lowercase__ : List[Any] = model_class_name(__lowercase) lowercase__ : List[Any] = model.encode(inputs_dict['input_ids']) lowercase__ : List[Any] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowercase__ : Dict = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ] , axis=-1 , ) lowercase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , __lowercase , __lowercase) lowercase__ : List[str] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowercase__ : List[str] = model.decode( decoder_input_ids[:, :-1] , __lowercase , decoder_attention_mask=__lowercase , past_key_values=__lowercase , decoder_position_ids=__lowercase , ) lowercase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4') lowercase__ : Optional[int] = model.decode( decoder_input_ids[:, -1:] , __lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowercase , decoder_position_ids=__lowercase , ) lowercase__ : Optional[int] = model.decode(__lowercase , __lowercase , decoder_attention_mask=__lowercase) lowercase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""") def snake_case__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : int=None , ): '''simple docstring''' if attention_mask is None: lowercase__ : Tuple = np.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: lowercase__ : List[Any] = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class SCREAMING_SNAKE_CASE__ (UpperCAmelCase_ , unittest.TestCase ): __lowerCamelCase : List[str] = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __lowerCamelCase : Optional[Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __lowerCamelCase : List[Any] = True __lowerCamelCase : Optional[int] = False __lowerCamelCase : Tuple = False __lowerCamelCase : Union[str, Any] = False def snake_case_ ( self): lowercase__ : List[str] = FlaxPegasusModelTester(self) lowercase__ : Optional[Any] = ConfigTester(self , config_class=__lowercase) def snake_case_ ( self): self.config_tester.run_common_tests() def snake_case_ ( self): lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__lowercase , __lowercase , __lowercase) def snake_case_ ( self): lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__lowercase , __lowercase , __lowercase) def snake_case_ ( self): lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): lowercase__ : int = self._prepare_for_class(__lowercase , __lowercase) lowercase__ : Union[str, Any] = model_class(__lowercase) @jax.jit def encode_jitted(a , a=None , **a): return model.encode(input_ids=__lowercase , attention_mask=__lowercase) with self.subTest('JIT Enabled'): lowercase__ : Tuple = encode_jitted(**__lowercase).to_tuple() with self.subTest('JIT Disabled'): with jax.disable_jit(): lowercase__ : List[str] = encode_jitted(**__lowercase).to_tuple() self.assertEqual(len(__lowercase) , len(__lowercase)) for jitted_output, output in zip(__lowercase , __lowercase): self.assertEqual(jitted_output.shape , output.shape) def snake_case_ ( self): lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): lowercase__ : List[Any] = model_class(__lowercase) lowercase__ : List[Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask']) lowercase__ : str = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(a , a , a): return model.decode( decoder_input_ids=__lowercase , decoder_attention_mask=__lowercase , encoder_outputs=__lowercase , ) with self.subTest('JIT Enabled'): lowercase__ : Union[str, Any] = decode_jitted(**__lowercase).to_tuple() with self.subTest('JIT Disabled'): with jax.disable_jit(): lowercase__ : Optional[int] = decode_jitted(**__lowercase).to_tuple() self.assertEqual(len(__lowercase) , len(__lowercase)) for jitted_output, output in zip(__lowercase , __lowercase): self.assertEqual(jitted_output.shape , output.shape) @slow def snake_case_ ( self): for model_class_name in self.all_model_classes: lowercase__ : int = model_class_name.from_pretrained('google/pegasus-large' , from_pt=__lowercase) lowercase__ : Union[str, Any] = np.ones((1, 1)) lowercase__ : Tuple = model(__lowercase) self.assertIsNotNone(__lowercase) @slow def snake_case_ ( self): lowercase__ : List[str] = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum') lowercase__ : List[Any] = PegasusTokenizer.from_pretrained('google/pegasus-xsum') lowercase__ : Optional[int] = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] lowercase__ : Tuple = [ '''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''', '''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''', ] lowercase__ : Optional[Any] = tokenizer(__lowercase , return_tensors='np' , truncation=__lowercase , max_length=512 , padding=__lowercase) lowercase__ : Union[str, Any] = model.generate(**__lowercase , num_beams=2).sequences lowercase__ : Any = tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase) assert tgt_text == decoded
367
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json''' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class SCREAMING_SNAKE_CASE__ (__snake_case ): __lowerCamelCase : List[Any] = """speech_to_text_2""" __lowerCamelCase : str = ["""past_key_values"""] __lowerCamelCase : List[Any] = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self , a=1_0000 , a=6 , a=2048 , a=4 , a=0.0 , a=True , a="relu" , a=256 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=2 , a=True , a=1 , a=0 , a=2 , a=1024 , **a , ): lowercase__ : Optional[int] = vocab_size lowercase__ : List[str] = d_model lowercase__ : int = decoder_ffn_dim lowercase__ : Optional[Any] = decoder_layers lowercase__ : int = decoder_attention_heads lowercase__ : Dict = dropout lowercase__ : Optional[int] = attention_dropout lowercase__ : Optional[int] = activation_dropout lowercase__ : Optional[int] = activation_function lowercase__ : Dict = init_std lowercase__ : List[Any] = decoder_layerdrop lowercase__ : int = use_cache lowercase__ : Any = decoder_layers lowercase__ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True lowercase__ : Tuple = max_target_positions super().__init__( pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
216
0
from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def lowerCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase__ = ArgumentParser("Transformers CLI tool", usage="transformers-cli <command> [<args>]" ) UpperCAmelCase__ = parser.add_subparsers(help="transformers-cli command helpers" ) # Register commands ConvertCommand.register_subcommand(__A ) DownloadCommand.register_subcommand(__A ) EnvironmentCommand.register_subcommand(__A ) RunCommand.register_subcommand(__A ) ServeCommand.register_subcommand(__A ) UserCommands.register_subcommand(__A ) AddNewModelCommand.register_subcommand(__A ) AddNewModelLikeCommand.register_subcommand(__A ) LfsCommands.register_subcommand(__A ) PTtoTFCommand.register_subcommand(__A ) # Let's go UpperCAmelCase__ = parser.parse_args() if not hasattr(__A, "func" ): parser.print_help() exit(1 ) # Run UpperCAmelCase__ = args.func(__A ) service.run() if __name__ == "__main__": main()
65
'''simple docstring''' import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device lowerCamelCase : Optional[int] = False class A__ ( unittest.TestCase ): pass @slow @require_torch_gpu class A__ ( unittest.TestCase ): def A ( self : Tuple ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) _SCREAMING_SNAKE_CASE =load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _SCREAMING_SNAKE_CASE =torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE =pipe( image=_a , generator=_a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images _SCREAMING_SNAKE_CASE =image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _SCREAMING_SNAKE_CASE =np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
47
0
'''simple docstring''' def lowerCAmelCase__ ( lowerCamelCase : list ): _A : int = len(lowerCamelCase ) for i in range(1 ,lowerCamelCase ): _A : Any = collection[i] _A : Tuple = 0 _A : Any = i - 1 while low <= high: _A : Dict = (low + high) // 2 if val < collection[mid]: _A : Union[str, Any] = mid - 1 else: _A : str = mid + 1 for j in range(lowerCamelCase ,lowerCamelCase ,-1 ): _A : Any = collection[j - 1] _A : str = val return collection if __name__ == "__main__": A : Any = input('''Enter numbers separated by a comma:\n''').strip() A : Tuple = [int(item) for item in user_input.split(''',''')] print(binary_insertion_sort(unsorted))
227
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : str=0.999 ,lowerCamelCase : int="cosine" ,): if alpha_transform_type == "cosine": def alpha_bar_fn(lowerCamelCase : Tuple ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowerCamelCase : List[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' ) _A : Tuple = [] for i in range(lowerCamelCase ): _A : List[Any] = i / num_diffusion_timesteps _A : List[str] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ) ,lowerCamelCase ) ) return torch.tensor(lowerCamelCase ,dtype=torch.floataa ) class __lowerCamelCase ( a_ , a_ ): """simple docstring""" a = [e.name for e in KarrasDiffusionSchedulers] a = 2 @register_to_config def __init__( self : int , SCREAMING_SNAKE_CASE : int = 1000 , SCREAMING_SNAKE_CASE : float = 0.0_0085 , SCREAMING_SNAKE_CASE : float = 0.012 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : str = "linspace" , SCREAMING_SNAKE_CASE : int = 0 , ): if trained_betas is not None: _A : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa) elif beta_schedule == "linear": _A : List[Any] = torch.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.floataa) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _A : Any = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE , dtype=torch.floataa) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _A : Optional[Any] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE) else: raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}') _A : Any = 1.0 - self.betas _A : List[Any] = torch.cumprod(self.alphas , dim=0) # set all values self.set_timesteps(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def A ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any=None): if schedule_timesteps is None: _A : Dict = self.timesteps _A : List[Any] = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter) == 0: _A : Dict = 1 if len(SCREAMING_SNAKE_CASE) > 1 else 0 else: _A : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE) else timestep _A : int = self._index_counter[timestep_int] return indices[pos].item() @property def A ( self : Optional[Any]): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def A ( self : List[Any] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , ): _A : Tuple = self.index_for_timestep(SCREAMING_SNAKE_CASE) if self.state_in_first_order: _A : Any = self.sigmas[step_index] else: _A : int = self.sigmas_interpol[step_index] _A : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5) return sample def A ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , ): _A : Optional[Any] = num_inference_steps _A : int = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": _A : Tuple = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE)[::-1].copy() elif self.config.timestep_spacing == "leading": _A : Optional[Any] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _A : int = (np.arange(0 , SCREAMING_SNAKE_CASE) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": _A : List[str] = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _A : str = (np.arange(SCREAMING_SNAKE_CASE , 0 , -step_ratio)).round().copy().astype(SCREAMING_SNAKE_CASE) timesteps -= 1 else: raise ValueError( F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.') _A : List[str] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) _A : Optional[int] = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE) _A : str = np.interp(SCREAMING_SNAKE_CASE , np.arange(0 , len(SCREAMING_SNAKE_CASE)) , SCREAMING_SNAKE_CASE) _A : str = np.concatenate([sigmas, [0.0]]).astype(np.floataa) _A : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE).to(device=SCREAMING_SNAKE_CASE) # interpolate sigmas _A : Optional[int] = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp() _A : Any = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]]) _A : List[Any] = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]]) if str(SCREAMING_SNAKE_CASE).startswith('mps'): # mps does not support float64 _A : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE , dtype=torch.floataa) else: _A : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) # interpolate timesteps _A : Optional[int] = self.sigma_to_t(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE , dtype=timesteps.dtype) _A : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten() _A : Optional[Any] = torch.cat([timesteps[:1], interleaved_timesteps]) _A : str = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter _A : Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE) def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]): # get log sigma _A : Dict = sigma.log() # get distribution _A : Any = log_sigma - self.log_sigmas[:, None] # get sigmas range _A : Tuple = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2) _A : Union[str, Any] = low_idx + 1 _A : Dict = self.log_sigmas[low_idx] _A : List[Any] = self.log_sigmas[high_idx] # interpolate sigmas _A : Dict = (low - log_sigma) / (low - high) _A : Union[str, Any] = w.clamp(0 , 1) # transform interpolation to time range _A : int = (1 - w) * low_idx + w * high_idx _A : Any = t.view(sigma.shape) return t @property def A ( self : Any): return self.sample is None def A ( self : int , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : bool = True , ): _A : Optional[int] = self.index_for_timestep(SCREAMING_SNAKE_CASE) # advance index counter by 1 _A : Dict = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: _A : Tuple = self.sigmas[step_index] _A : Dict = self.sigmas_interpol[step_index + 1] _A : Union[str, Any] = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method _A : int = self.sigmas[step_index - 1] _A : Union[str, Any] = self.sigmas_interpol[step_index] _A : Dict = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API _A : List[Any] = 0 _A : Dict = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": _A : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol _A : Tuple = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": _A : Any = sigma_hat if self.state_in_first_order else sigma_interpol _A : Union[str, Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('prediction_type not implemented yet: sample') else: raise ValueError( F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`') if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order _A : int = (sample - pred_original_sample) / sigma_hat # 3. delta timestep _A : str = sigma_interpol - sigma_hat # store for 2nd order step _A : List[str] = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order _A : List[Any] = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep _A : Optional[int] = sigma_next - sigma_hat _A : str = self.sample _A : Any = None _A : Tuple = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE) def A ( self : Any , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples _A : str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype) if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE): # mps does not support float64 _A : Any = self.timesteps.to(original_samples.device , dtype=torch.floataa) _A : List[str] = timesteps.to(original_samples.device , dtype=torch.floataa) else: _A : str = self.timesteps.to(original_samples.device) _A : str = timesteps.to(original_samples.device) _A : int = [self.index_for_timestep(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) for t in timesteps] _A : Tuple = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): _A : List[Any] = sigma.unsqueeze(-1) _A : Dict = original_samples + noise * sigma return noisy_samples def __len__( self : List[Any]): return self.config.num_train_timesteps
227
1
"""simple docstring""" from collections.abc import Callable import numpy as np def lowercase ( a__ : Callable , a__ : float , a__ : float , a__ : float , a__ : float ) -> np.ndarray: _UpperCamelCase = int(np.ceil((x_end - xa) / step_size ) ) _UpperCamelCase = np.zeros((n + 1,) ) _UpperCamelCase = ya _UpperCamelCase = xa for k in range(a__ ): _UpperCamelCase = y[k] + step_size * ode_func(a__ , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
256
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_lowercase): snake_case__ = ['''flax'''] def __init__( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[str] ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : Any , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : int ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[str] ) -> str: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=_lowercase): snake_case__ = ['''flax'''] def __init__( self : int , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Any ) -> str: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : int , *__UpperCamelCase : Dict , **__UpperCamelCase : Union[str, Any] ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Tuple ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=_lowercase): snake_case__ = ['''flax'''] def __init__( self : str , *__UpperCamelCase : Dict , **__UpperCamelCase : List[Any] ) -> int: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Any , **__UpperCamelCase : Union[str, Any] ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Tuple ) -> List[str]: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=_lowercase): snake_case__ = ['''flax'''] def __init__( self : List[Any] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : int ) -> Any: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Dict ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : Any , *__UpperCamelCase : int , **__UpperCamelCase : int ) -> Any: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=_lowercase): snake_case__ = ['''flax'''] def __init__( self : List[Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Any ) -> str: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ) -> List[str]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : int ) -> Tuple: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=_lowercase): snake_case__ = ['''flax'''] def __init__( self : Tuple , *__UpperCamelCase : Any , **__UpperCamelCase : List[Any] ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : int , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Dict ) -> int: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : str , *__UpperCamelCase : int , **__UpperCamelCase : Optional[int] ) -> Any: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=_lowercase): snake_case__ = ['''flax'''] def __init__( self : Tuple , *__UpperCamelCase : str , **__UpperCamelCase : Any ) -> Tuple: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Tuple , **__UpperCamelCase : List[str] ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : Tuple , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : str ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=_lowercase): snake_case__ = ['''flax'''] def __init__( self : Union[str, Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Any ) -> Tuple: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : str , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : str ) -> Tuple: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=_lowercase): snake_case__ = ['''flax'''] def __init__( self : List[str] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Optional[Any] ) -> Optional[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : int , **__UpperCamelCase : Optional[Any] ) -> int: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : Optional[int] , *__UpperCamelCase : List[str] , **__UpperCamelCase : str ) -> List[str]: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=_lowercase): snake_case__ = ['''flax'''] def __init__( self : Optional[int] , *__UpperCamelCase : Tuple , **__UpperCamelCase : Tuple ) -> Any: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : Optional[int] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Optional[Any] ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : Any , *__UpperCamelCase : Any , **__UpperCamelCase : Union[str, Any] ) -> Any: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=_lowercase): snake_case__ = ['''flax'''] def __init__( self : Union[str, Any] , *__UpperCamelCase : Any , **__UpperCamelCase : Optional[Any] ) -> Optional[int]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : Union[str, Any] , *__UpperCamelCase : Tuple , **__UpperCamelCase : str ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Dict ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=_lowercase): snake_case__ = ['''flax'''] def __init__( self : str , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Union[str, Any] ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Any ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : str , **__UpperCamelCase : Tuple ) -> List[str]: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=_lowercase): snake_case__ = ['''flax'''] def __init__( self : Dict , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) -> Optional[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Dict , **__UpperCamelCase : List[str] ) -> Dict: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCamelCase ( cls : Any , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Union[str, Any] ) -> Tuple: requires_backends(cls , ['''flax'''] )
256
1
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def SCREAMING_SNAKE_CASE__ ( snake_case : jnp.ndarray , snake_case : int , snake_case : float = 1 , snake_case : float = 1 , snake_case : float = 1.0E4 , snake_case : bool = False , snake_case : float = 1.0 , ) -> jnp.ndarray: """simple docstring""" assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" a : Optional[int] = float(embedding_dim // 2 ) a : List[Any] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) a : Optional[int] = min_timescale * jnp.exp(jnp.arange(snake_case , dtype=jnp.floataa ) * -log_timescale_increment ) a : Optional[Any] = jnp.expand_dims(snake_case , 1 ) * jnp.expand_dims(snake_case , 0 ) # scale embeddings a : str = scale * emb if flip_sin_to_cos: a : Dict = jnp.concatenate([jnp.cos(snake_case ), jnp.sin(snake_case )] , axis=1 ) else: a : Union[str, Any] = jnp.concatenate([jnp.sin(snake_case ), jnp.cos(snake_case )] , axis=1 ) a : Optional[int] = jnp.reshape(snake_case , [jnp.shape(snake_case )[0], embedding_dim] ) return signal class UpperCamelCase ( nn.Module ): """simple docstring""" A : int = 32 A : jnp.dtype = jnp.floataa @nn.compact def __call__( self : int , UpperCAmelCase_ : List[str]): """simple docstring""" a : Optional[int] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1')(UpperCAmelCase_) a : List[str] = nn.silu(UpperCAmelCase_) a : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2')(UpperCAmelCase_) return temb class UpperCamelCase ( nn.Module ): """simple docstring""" A : int = 32 A : bool = False A : float = 1 @nn.compact def __call__( self : Any , UpperCAmelCase_ : int): """simple docstring""" return get_sinusoidal_embeddings( UpperCAmelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
345
'''simple docstring''' import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures UpperCamelCase : List[str] = logging.get_logger(__name__) @dataclass class UpperCamelCase : """simple docstring""" A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} ) A : str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) A : int = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A : bool = field( default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def SCREAMING_SNAKE_CASE_ ( self : str): """simple docstring""" a : Union[str, Any] = self.task_name.lower() class UpperCamelCase ( a_ ): """simple docstring""" A : int = "train" A : Tuple = "dev" A : List[Any] = "test" class UpperCamelCase ( a_ ): """simple docstring""" A : GlueDataTrainingArguments A : str A : List[InputFeatures] def __init__( self : Tuple , UpperCAmelCase_ : GlueDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizerBase , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[str] = None , ): """simple docstring""" warnings.warn( 'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , UpperCAmelCase_ , ) a : Dict = args a : int = glue_processors[args.task_name]() a : int = glue_output_modes[args.task_name] if isinstance(UpperCAmelCase_ , UpperCAmelCase_): try: a : str = Split[mode] except KeyError: raise KeyError('mode is not a valid split name') # Load data features from cache or dataset file a : List[str] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , ) a : Tuple = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) a , a : str = label_list[2], label_list[1] a : int = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. a : Union[str, Any] = cached_features_file + '.lock' with FileLock(UpperCAmelCase_): if os.path.exists(UpperCAmelCase_) and not args.overwrite_cache: a : Optional[Any] = time.time() a : Optional[Any] = torch.load(UpperCAmelCase_) logger.info( f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start) else: logger.info(f"""Creating features from dataset file at {args.data_dir}""") if mode == Split.dev: a : List[Any] = self.processor.get_dev_examples(args.data_dir) elif mode == Split.test: a : Optional[Any] = self.processor.get_test_examples(args.data_dir) else: a : List[str] = self.processor.get_train_examples(args.data_dir) if limit_length is not None: a : Dict = examples[:limit_length] a : List[Any] = glue_convert_examples_to_features( UpperCAmelCase_ , UpperCAmelCase_ , max_length=args.max_seq_length , label_list=UpperCAmelCase_ , output_mode=self.output_mode , ) a : Dict = time.time() torch.save(self.features , UpperCAmelCase_) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""") def __len__( self : Tuple): """simple docstring""" return len(self.features) def __getitem__( self : Optional[int] , UpperCAmelCase_ : List[str]): """simple docstring""" return self.features[i] def SCREAMING_SNAKE_CASE_ ( self : str): """simple docstring""" return self.label_list
345
1
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int: '''simple docstring''' return number | (1 << position) def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int: '''simple docstring''' return number & ~(1 << position) def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int: '''simple docstring''' return number ^ (1 << position) def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> bool: '''simple docstring''' return ((number >> position) & 1) == 1 def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int: '''simple docstring''' return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
138
class lowerCamelCase_ : '''simple docstring''' def __init__( self : List[Any] ): SCREAMING_SNAKE_CASE_ = {} def lowerCAmelCase_ ( self : List[str] ): print(self.vertex ) for i in self.vertex: print(_lowerCAmelCase , ' -> ' , ' -> '.join([str(_lowerCAmelCase ) for j in self.vertex[i]] ) ) def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ): # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(_lowerCAmelCase ) else: # else make a new vertex SCREAMING_SNAKE_CASE_ = [to_vertex] def lowerCAmelCase_ ( self : Dict ): # visited array for storing already visited nodes SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : list ): # mark start vertex as visited SCREAMING_SNAKE_CASE_ = True print(_lowerCAmelCase , end=' ' ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase__ : List[Any] = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print('DFS:') g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
225
0
'''simple docstring''' import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): # Initialise PyTorch model _UpperCamelCase : Any = LxmertConfig.from_json_file(UpperCAmelCase_ ) print(f'Building PyTorch model from configuration: {config}' ) _UpperCamelCase : int = LxmertForPreTraining(UpperCAmelCase_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , UpperCAmelCase_ ) if __name__ == "__main__": snake_case_ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) snake_case_ : List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
236
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case_ : Optional[int] = logging.get_logger(__name__) snake_case_ : List[Any] = { 'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json', 'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json', # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class lowercase__ ( lowercase ): lowercase__ = """xlm-roberta-xl""" def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[Any]=250880 ,lowerCamelCase__ : Tuple=2560 ,lowerCamelCase__ : Union[str, Any]=36 ,lowerCamelCase__ : List[str]=32 ,lowerCamelCase__ : Optional[Any]=10240 ,lowerCamelCase__ : Tuple="gelu" ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : Optional[int]=514 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : Dict=0.0_2 ,lowerCamelCase__ : Any=1E-05 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : str=0 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Union[str, Any]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[str]=None ,**lowerCamelCase__ : Dict ,): '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ) _UpperCamelCase : Optional[int] = vocab_size _UpperCamelCase : Optional[Any] = hidden_size _UpperCamelCase : str = num_hidden_layers _UpperCamelCase : str = num_attention_heads _UpperCamelCase : Any = hidden_act _UpperCamelCase : Dict = intermediate_size _UpperCamelCase : Optional[int] = hidden_dropout_prob _UpperCamelCase : Any = attention_probs_dropout_prob _UpperCamelCase : List[Any] = max_position_embeddings _UpperCamelCase : str = type_vocab_size _UpperCamelCase : Optional[Any] = initializer_range _UpperCamelCase : Optional[int] = layer_norm_eps _UpperCamelCase : Optional[int] = position_embedding_type _UpperCamelCase : Optional[Any] = use_cache _UpperCamelCase : Optional[Any] = classifier_dropout class lowercase__ ( lowercase ): @property def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": _UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
236
1
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCAmelCase_ : def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]: return None class UpperCAmelCase_ : def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]: return None class UpperCAmelCase_ ( unittest.TestCase ): UpperCamelCase =[ # (model_name, model_kwargs) ("bert-base-cased", {}), ("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _lowerCamelCase ( self ) -> str: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(UpperCamelCase_ , '''tf''' , 12 , **UpperCamelCase_ ) @require_torch @slow def _lowerCamelCase ( self ) -> List[str]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(UpperCamelCase_ , '''pt''' , 12 , **UpperCamelCase_ ) @require_torch @slow def _lowerCamelCase ( self ) -> Any: from transformers import BertModel __lowercase : Optional[Any] = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words'''] with NamedTemporaryFile(mode='''w+t''' ) as vocab_file: vocab_file.write('''\n'''.join(UpperCamelCase_ ) ) vocab_file.flush() __lowercase : List[str] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: __lowercase : Tuple = BertModel(BertConfig(vocab_size=len(UpperCamelCase_ ) ) ) model.save_pretrained(UpperCamelCase_ ) self._test_export(UpperCamelCase_ , '''pt''' , 12 , UpperCamelCase_ ) @require_tf @slow def _lowerCamelCase ( self ) -> int: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: __lowercase : Any = self._test_export(UpperCamelCase_ , '''tf''' , 12 , **UpperCamelCase_ ) __lowercase : Tuple = quantize(Path(UpperCamelCase_ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(UpperCamelCase_ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) @require_torch @slow def _lowerCamelCase ( self ) -> Optional[int]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: __lowercase : List[str] = self._test_export(UpperCamelCase_ , '''pt''' , 12 , **UpperCamelCase_ ) __lowercase : Dict = quantize(UpperCamelCase_ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(UpperCamelCase_ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , **UpperCamelCase_ ) -> Dict: try: # Compute path with TemporaryDirectory() as tempdir: __lowercase : Union[str, Any] = Path(UpperCamelCase_ ).joinpath('''model.onnx''' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) return path except Exception as e: self.fail(UpperCamelCase_ ) @require_torch @require_tokenizers @slow def _lowerCamelCase ( self ) -> Tuple: from transformers import BertModel __lowercase : Optional[int] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) __lowercase : Union[str, Any] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(UpperCamelCase_ , UpperCamelCase_ , '''pt''' ) @require_tf @require_tokenizers @slow def _lowerCamelCase ( self ) -> List[str]: from transformers import TFBertModel __lowercase : Tuple = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) __lowercase : Any = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(UpperCamelCase_ , UpperCamelCase_ , '''tf''' ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int: __lowercase : Dict = FeatureExtractionPipeline(UpperCamelCase_ , UpperCamelCase_ ) __lowercase : Dict = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1'''] __lowercase ,__lowercase ,__lowercase ,__lowercase : Any = infer_shapes(UpperCamelCase_ , UpperCamelCase_ ) # Assert all variables are present self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , UpperCamelCase_ ) self.assertSequenceEqual(variable_names[3:] , UpperCamelCase_ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} ) self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} ) def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : Dict = ['''input_ids''', '''attention_mask''', '''token_type_ids'''] __lowercase : str = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]} __lowercase ,__lowercase : Optional[int] = ensure_valid_input(FuncContiguousArgs() , UpperCamelCase_ , UpperCamelCase_ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(UpperCamelCase_ ) , 3 ) # Should have exactly the same input names self.assertEqual(set(UpperCamelCase_ ) , set(UpperCamelCase_ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(UpperCamelCase_ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) __lowercase ,__lowercase : str = ensure_valid_input(FuncNonContiguousArgs() , UpperCamelCase_ , UpperCamelCase_ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(UpperCamelCase_ ) , 1 ) self.assertEqual(len(UpperCamelCase_ ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['''input_ids'''] ) self.assertEqual(ordered_input_names[0] , '''input_ids''' ) def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : Any = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' ) self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
249
"""simple docstring""" import math def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if initial_intensity < 0: raise ValueError('''The value of intensity cannot be negative''' ) # handling of negative values of initial intensity if angle < 0 or angle > 3_60: raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='malus_law')
249
1
"""simple docstring""" import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer lowerCamelCase__ = ["bert-base-uncased", "bert-base-cased"] lowerCamelCase__ = "hf-internal-testing/tiny-bert-tf-only" if is_tf_available(): class __SCREAMING_SNAKE_CASE ( tf.keras.Model ): '''simple docstring''' def __init__( self : Union[str, Any] , __a : Optional[Any] ) -> Any: super().__init__() _UpperCamelCase : Tuple = tokenizer _UpperCamelCase : Tuple = AutoConfig.from_pretrained(__a ) _UpperCamelCase : Optional[int] = TFAutoModel.from_config(__a ) def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[Any] ) -> Optional[Any]: _UpperCamelCase : List[Any] = self.tokenizer(__a ) _UpperCamelCase : List[Any] = self.bert(**__a ) return out["pooler_output"] @require_tf @require_tensorflow_text class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: super().setUp() _UpperCamelCase : Dict = [ BertTokenizer.from_pretrained(__a ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false _UpperCamelCase : Tuple = [TFBertTokenizer.from_pretrained(__a ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(__a , use_fast_bert_tokenizer=__a ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) _UpperCamelCase : Optional[int] = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] _UpperCamelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): _UpperCamelCase : Tuple = tokenizer(__a , return_tensors="tf" , padding="longest" ) _UpperCamelCase : int = tf_tokenizer(__a ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict: for tf_tokenizer in self.tf_tokenizers: _UpperCamelCase : Any = tf_tokenizer(self.paired_sentences ) _UpperCamelCase : Dict = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: for tf_tokenizer in self.tf_tokenizers: _UpperCamelCase : Optional[int] = tf.function(__a ) for test_inputs in (self.test_sentences, self.paired_sentences): _UpperCamelCase : Any = tf.constant(__a ) _UpperCamelCase : Tuple = compiled_tokenizer(__a ) _UpperCamelCase : Union[str, Any] = tf_tokenizer(__a ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: for tf_tokenizer in self.tf_tokenizers: _UpperCamelCase : Dict = ModelToSave(tokenizer=__a ) _UpperCamelCase : List[str] = tf.convert_to_tensor(self.test_sentences ) _UpperCamelCase : Any = model(__a ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: _UpperCamelCase : Union[str, Any] = Path(__a ) / "saved.model" model.save(__a ) _UpperCamelCase : Union[str, Any] = tf.keras.models.load_model(__a ) _UpperCamelCase : Union[str, Any] = loaded_model(__a ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
362
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" for attribute in key.split("." ): _UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ) if weight_type is not None: _UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape else: _UpperCamelCase : int = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _UpperCamelCase : Optional[Any] = value elif weight_type == "weight_g": _UpperCamelCase : int = value elif weight_type == "weight_v": _UpperCamelCase : Optional[Any] = value elif weight_type == "bias": _UpperCamelCase : int = value else: _UpperCamelCase : Any = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]: """simple docstring""" _UpperCamelCase : List[str] = [] _UpperCamelCase : Any = fairseq_model.state_dict() _UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _UpperCamelCase : List[str] = False if "conv_layers" in name: load_conv_layer( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,) _UpperCamelCase : Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): _UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _UpperCamelCase : Any = True if "*" in mapped_key: _UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2] _UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ ) if "weight_g" in name: _UpperCamelCase : str = "weight_g" elif "weight_v" in name: _UpperCamelCase : Any = "weight_v" elif "weight" in name: _UpperCamelCase : List[str] = "weight" elif "bias" in name: _UpperCamelCase : List[Any] = "bias" else: _UpperCamelCase : str = None set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) continue if not is_used: unused_weights.append(lowercase_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any: """simple docstring""" _UpperCamelCase : Any = full_name.split("conv_layers." )[-1] _UpperCamelCase : Optional[Any] = name.split("." ) _UpperCamelCase : Union[str, Any] = int(items[0] ) _UpperCamelCase : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _UpperCamelCase : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _UpperCamelCase : Tuple = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _UpperCamelCase : List[str] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _UpperCamelCase : int = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowercase_ ) def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : Dict = SEWConfig() if is_finetuned: _UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg else: _UpperCamelCase : List[Any] = model.cfg _UpperCamelCase : Any = fs_config.conv_bias _UpperCamelCase : str = eval(fs_config.conv_feature_layers ) _UpperCamelCase : Any = [x[0] for x in conv_layers] _UpperCamelCase : List[Any] = [x[1] for x in conv_layers] _UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers] _UpperCamelCase : str = "gelu" _UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group" _UpperCamelCase : Optional[int] = 0.0 _UpperCamelCase : Dict = fs_config.activation_fn.name _UpperCamelCase : Any = fs_config.encoder_embed_dim _UpperCamelCase : Optional[Any] = 0.02 _UpperCamelCase : str = fs_config.encoder_ffn_embed_dim _UpperCamelCase : int = 1e-5 _UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop _UpperCamelCase : str = fs_config.encoder_attention_heads _UpperCamelCase : Tuple = fs_config.conv_pos_groups _UpperCamelCase : List[str] = fs_config.conv_pos _UpperCamelCase : Optional[int] = len(lowercase_ ) _UpperCamelCase : Union[str, Any] = fs_config.encoder_layers _UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _UpperCamelCase : List[str] = model.cfg _UpperCamelCase : List[str] = fs_config.final_dropout _UpperCamelCase : Optional[Any] = fs_config.layerdrop _UpperCamelCase : int = fs_config.activation_dropout _UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _UpperCamelCase : int = fs_config.attention_dropout _UpperCamelCase : int = fs_config.dropout_input _UpperCamelCase : List[Any] = fs_config.dropout _UpperCamelCase : List[Any] = fs_config.mask_channel_length _UpperCamelCase : List[str] = fs_config.mask_channel_prob _UpperCamelCase : Optional[Any] = fs_config.mask_length _UpperCamelCase : Optional[int] = fs_config.mask_prob _UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor" _UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer" return config @torch.no_grad() def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str: """simple docstring""" if is_finetuned: _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ ) else: _UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ ) _UpperCamelCase : List[str] = model[0].eval() _UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False _UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,) if is_finetuned: if dict_path: _UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _UpperCamelCase : List[str] = target_dict.pad_index _UpperCamelCase : Optional[int] = target_dict.bos_index _UpperCamelCase : Any = target_dict.pad_index _UpperCamelCase : List[Any] = target_dict.bos_index _UpperCamelCase : List[str] = target_dict.eos_index _UpperCamelCase : Optional[Any] = len(target_dict.symbols ) _UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" ) if not os.path.isdir(lowercase_ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) ) return os.makedirs(lowercase_ ,exist_ok=lowercase_ ) with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices ,lowercase_ ) _UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer( lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,) _UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ ) processor.save_pretrained(lowercase_ ) _UpperCamelCase : List[Any] = SEWForCTC(lowercase_ ) else: _UpperCamelCase : int = SEWModel(lowercase_ ) feature_extractor.save_pretrained(lowercase_ ) recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ ) hf_model.save_pretrained(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) lowerCamelCase__ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
310
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class _lowerCamelCase : '''simple docstring''' A_ : Dict = MBartConfig A_ : Any = {} A_ : str = """gelu""" def __init__( self : Optional[Any] , _A : int , _A : int=13 , _A : Optional[int]=7 , _A : Union[str, Any]=True , _A : Dict=False , _A : Optional[Any]=99 , _A : Dict=32 , _A : Any=2 , _A : Tuple=4 , _A : List[str]=37 , _A : List[str]=0.1 , _A : Union[str, Any]=0.1 , _A : List[Any]=20 , _A : str=2 , _A : int=1 , _A : int=0 , ) -> List[str]: __magic_name__ : List[Any] = parent __magic_name__ : Any = batch_size __magic_name__ : Optional[int] = seq_length __magic_name__ : Union[str, Any] = is_training __magic_name__ : Optional[int] = use_labels __magic_name__ : int = vocab_size __magic_name__ : Tuple = hidden_size __magic_name__ : Dict = num_hidden_layers __magic_name__ : Tuple = num_attention_heads __magic_name__ : Optional[int] = intermediate_size __magic_name__ : Any = hidden_dropout_prob __magic_name__ : Dict = attention_probs_dropout_prob __magic_name__ : Optional[Any] = max_position_embeddings __magic_name__ : str = eos_token_id __magic_name__ : str = pad_token_id __magic_name__ : Dict = bos_token_id def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: __magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __magic_name__ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __magic_name__ : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 ) __magic_name__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : Tuple = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __magic_name__ : Any = prepare_mbart_inputs_dict(_A , _A , _A ) return config, inputs_dict def __lowerCAmelCase ( self : Optional[Any] , _A : List[Any] , _A : Tuple ) -> Dict: __magic_name__ : Optional[int] = TFMBartModel(config=_A ).get_decoder() __magic_name__ : Any = inputs_dict['input_ids'] __magic_name__ : int = input_ids[:1, :] __magic_name__ : Dict = inputs_dict['attention_mask'][:1, :] __magic_name__ : Union[str, Any] = inputs_dict['head_mask'] __magic_name__ : str = 1 # first forward pass __magic_name__ : Tuple = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A ) __magic_name__ , __magic_name__ : List[Any] = outputs.to_tuple() __magic_name__ : Tuple = past_key_values[1] def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Any=None , ): """simple docstring""" if attention_mask is None: __magic_name__ : int = tf.cast(tf.math.not_equal(lowerCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __magic_name__ : Tuple = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __magic_name__ : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __magic_name__ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __magic_name__ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () A_ : Dict = (TFMBartForConditionalGeneration,) if is_tf_available() else () A_ : List[str] = ( { """conversational""": TFMBartForConditionalGeneration, """feature-extraction""": TFMBartModel, """summarization""": TFMBartForConditionalGeneration, """text2text-generation""": TFMBartForConditionalGeneration, """translation""": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) A_ : Tuple = True A_ : str = False A_ : Dict = False def __lowerCAmelCase ( self : Tuple , _A : Union[str, Any] , _A : Tuple , _A : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[Any]: if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : List[str] = TFMBartModelTester(self ) __magic_name__ : List[str] = ConfigTester(self , config_class=_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[int]: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : str ) -> List[str]: __magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_A ) @require_sentencepiece @require_tokenizers @require_tf class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = [ """ UN Chief Says There Is No Military Solution in Syria""", ] A_ : List[str] = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", ] A_ : int = """facebook/mbart-large-en-ro""" @cached_property def __lowerCAmelCase ( self : Union[str, Any] ) -> str: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __lowerCAmelCase ( self : Optional[int] ) -> Tuple: __magic_name__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __lowerCAmelCase ( self : Any , **_A : Union[str, Any] ) -> Dict: __magic_name__ : Optional[Any] = self.translate_src_text(**_A ) self.assertListEqual(self.expected_text , _A ) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> int: __magic_name__ : List[Any] = self.tokenizer(self.src_text , **_A , return_tensors='tf' ) __magic_name__ : int = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) __magic_name__ : Any = self.tokenizer.batch_decode(_A , skip_special_tokens=_A ) return generated_words @slow def __lowerCAmelCase ( self : Tuple ) -> Optional[int]: self._assert_generated_batch_equal_expected()
331
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase :Tuple = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any: super().__init__(*_A , **_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]: __magic_name__ : Union[str, Any] = {} __magic_name__ : Optional[Any] = {} if prompt is not None: __magic_name__ : Union[str, Any] = prompt if generate_kwargs is not None: __magic_name__ : str = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __magic_name__ : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) __magic_name__ : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict: __magic_name__ : List[Any] = load_image(_A ) if prompt is not None: if not isinstance(_A , _A ): raise ValueError( F'Received an invalid text input, got - {type(_A )} - but expected a single string. ' 'Note also that one single text can be provided for conditional image to text generation.' ) __magic_name__ : Any = self.model.config.model_type if model_type == "git": __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids __magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids __magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": __magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework ) model_inputs.update(_A ) else: raise ValueError(F'Model type {model_type} does not support conditional text generation' ) else: __magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __magic_name__ : int = None return model_inputs def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _A ) and all(x is None for x in model_inputs['input_ids'] ) ): __magic_name__ : str = None if generate_kwargs is None: __magic_name__ : Optional[int] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name ) __magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A ) return model_outputs def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]: __magic_name__ : Optional[Any] = [] for output_ids in model_outputs: __magic_name__ : Union[str, Any] = { 'generated_text': self.tokenizer.decode( _A , skip_special_tokens=_A , ) } records.append(_A ) return records
331
1
"""simple docstring""" snake_case__ : Union[str, Any] = 65_521 def _snake_case ( _snake_case : str ): lowerCAmelCase : Dict = 1 lowerCAmelCase : Optional[Any] = 0 for plain_chr in plain_text: lowerCAmelCase : Dict = (a + ord(_snake_case )) % MOD_ADLER lowerCAmelCase : str = (b + a) % MOD_ADLER return (b << 16) | a
365
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class snake_case_( a__ ): def __init__( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ): super().__init__() # make sure scheduler can always be converted to DDIM lowerCAmelCase : str = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) @torch.no_grad() def __call__( self : str , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ): # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size , UpperCamelCase_ ): lowerCAmelCase : Dict = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCAmelCase : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase : int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCAmelCase : Dict = self.scheduler.step( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample lowerCAmelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase : Any = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
314
0
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient __a : Optional[Any] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""]) def UpperCAmelCase ( lowercase ): """simple docstring""" __lowercase = test_results.split(''' ''' ) __lowercase = 0 __lowercase = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. __lowercase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1] for i, expression in enumerate(lowercase ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def UpperCAmelCase ( lowercase ): """simple docstring""" __lowercase = {} __lowercase = None __lowercase = False for line in failures_short_lines.split('''\n''' ): if re.search(r'''_ \[doctest\]''' , lowercase ): __lowercase = True __lowercase = line.split(''' ''' )[2] elif in_error and not line.split(''' ''' )[0].isdigit(): __lowercase = line __lowercase = False return failures class _UpperCamelCase : """simple docstring""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: '''simple docstring''' __lowercase = title __lowercase = doc_test_results['''time_spent'''].split(''',''' )[0] __lowercase = doc_test_results['''success'''] __lowercase = doc_test_results['''failures'''] __lowercase = self.n_success + self.n_failures # Failures and success of the modeling tests __lowercase = doc_test_results @property def _SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' __lowercase = [self._time_spent] __lowercase = 0 for time in time_spent: __lowercase = time.split(''':''' ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(lowerCAmelCase__ ) == 1: __lowercase = [0, 0, time_parts[0]] __lowercase , __lowercase , __lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 36_00 + minutes * 60 + seconds __lowercase , __lowercase , __lowercase = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60 return F"{int(lowerCAmelCase__ )}h{int(lowerCAmelCase__ )}m{int(lowerCAmelCase__ )}s" @property def _SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def _SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' return { "type": "section", "text": { "type": "plain_text", "text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def _SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' return { "type": "section", "text": { "type": "plain_text", "text": ( F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in" F" {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def _SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' __lowercase = 40 __lowercase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )} __lowercase = '''''' for category, failures in category_failures.items(): if len(lowerCAmelCase__ ) == 0: continue if report != "": report += "\n\n" report += F"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(lowerCAmelCase__ ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F"The following examples had failures:\n\n\n{report}\n", }, } @property def _SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' __lowercase = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(lowerCAmelCase__ ) @staticmethod def _SCREAMING_SNAKE_CASE ( ) -> Any: '''simple docstring''' __lowercase = [ { '''type''': '''section''', '''text''': { '''type''': '''plain_text''', '''text''': '''There was an issue running the tests.''', }, '''accessory''': { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True}, '''url''': F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } ] print('''Sending the following payload''' ) print(json.dumps({'''blocks''': json.loads(lowerCAmelCase__ )} ) ) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' print('''Sending the following payload''' ) print(json.dumps({'''blocks''': json.loads(self.payload )} ) ) __lowercase = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.''' __lowercase = client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: '''simple docstring''' __lowercase = '''''' for key, value in failures.items(): __lowercase = value[:2_00] + ''' [Truncated]''' if len(lowerCAmelCase__ ) > 2_50 else value failures_text += F"*{key}*\n_{value}_\n\n" __lowercase = job_name __lowercase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}} if job_link is not None: __lowercase = { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True}, '''url''': job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def _SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' if self.thread_ts is None: raise ValueError('''Can only post reply if a post has been made.''' ) __lowercase = self.doc_test_results.pop('''job_link''' ) self.doc_test_results.pop('''failures''' ) self.doc_test_results.pop('''success''' ) self.doc_test_results.pop('''time_spent''' ) __lowercase = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase__ : t[0] ) for job, job_result in sorted_dict: if len(job_result['''failures'''] ): __lowercase = F"*Num failures* :{len(job_result['failed'] )} \n" __lowercase = job_result['''failures'''] __lowercase = self.get_reply_blocks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text=lowerCAmelCase__ ) print('''Sending the following reply''' ) print(json.dumps({'''blocks''': blocks} ) ) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F"Results for {job}" , blocks=lowerCAmelCase__ , thread_ts=self.thread_ts['''ts'''] , ) time.sleep(1 ) def UpperCAmelCase ( ): """simple docstring""" __lowercase = os.environ['''GITHUB_RUN_ID'''] __lowercase = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100" __lowercase = requests.get(lowercase ).json() __lowercase = {} try: jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) __lowercase = math.ceil((result['''total_count'''] - 100) / 100 ) for i in range(lowercase ): __lowercase = requests.get(url + F"&page={i + 2}" ).json() jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) return jobs except Exception as e: print('''Unknown error, could not fetch links.''' , lowercase ) return {} def UpperCAmelCase ( lowercase ): """simple docstring""" __lowercase = {} if os.path.exists(lowercase ): __lowercase = os.listdir(lowercase ) for file in files: try: with open(os.path.join(lowercase , lowercase ) , encoding='''utf-8''' ) as f: __lowercase = f.read() except UnicodeDecodeError as e: raise ValueError(F"Could not open {os.path.join(lowercase , lowercase )}." ) from e return _artifact def UpperCAmelCase ( ): """simple docstring""" class _UpperCamelCase : """simple docstring""" def __init__( self , lowerCAmelCase__ ) -> Optional[Any]: '''simple docstring''' __lowercase = name __lowercase = [] def __str__( self ) -> int: '''simple docstring''' return self.name def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Any: '''simple docstring''' self.paths.append({'''name''': self.name, '''path''': path} ) __lowercase = {} __lowercase = filter(os.path.isdir , os.listdir() ) for directory in directories: __lowercase = directory if artifact_name not in _available_artifacts: __lowercase = Artifact(lowercase ) _available_artifacts[artifact_name].add_path(lowercase ) return _available_artifacts if __name__ == "__main__": __a : Optional[int] = get_job_links() __a : Optional[int] = retrieve_available_artifacts() __a : str = collections.OrderedDict( [ ("""*.py""", """API Examples"""), ("""*.md""", """MD Examples"""), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' __a : int = { v: { """failed""": [], """failures""": {}, } for v in docs.values() } # Link to the GitHub Action job __a : Tuple = github_actions_job_links.get("""run_doctests""") __a : Optional[int] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0] __a : int = retrieve_artifact(artifact_path["""name"""]) if "stats" in artifact: __a , __a , __a : Dict = handle_test_results(artifact["""stats"""]) __a : Optional[int] = failed __a : Optional[Any] = success __a : Tuple = time_spent[1:-1] + """, """ __a : Optional[Any] = extract_first_line_failure(artifact["""failures_short"""]) for line in artifact["summary_short"].split("""\n"""): if re.search("""FAILED""", line): __a : Dict = line.replace("""FAILED """, """""") __a : Tuple = line.split()[0].replace("""\n""", """""") if "::" in line: __a , __a : int = line.split("""::""") else: __a , __a : int = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): __a : Tuple = docs[file_regex] doc_test_results[category]["failed"].append(test) __a : Optional[int] = all_failures[test] if test in all_failures else """N/A""" __a : Tuple = failure break __a : Tuple = Message("""🤗 Results of the doc tests.""", doc_test_results) message.post() message.post_reply()
210
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __a : str = logging.get_logger(__name__) __a : int = { """microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class _UpperCamelCase ( _UpperCAmelCase ): """simple docstring""" __a : Any = '''wavlm''' def __init__( self , lowerCAmelCase__=32 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="group" , lowerCAmelCase__="gelu" , lowerCAmelCase__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__=False , lowerCAmelCase__=1_28 , lowerCAmelCase__=16 , lowerCAmelCase__=3_20 , lowerCAmelCase__=8_00 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.05 , lowerCAmelCase__=10 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=10 , lowerCAmelCase__=3_20 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_00 , lowerCAmelCase__=2_56 , lowerCAmelCase__=2_56 , lowerCAmelCase__=0.1 , lowerCAmelCase__="mean" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=2_56 , lowerCAmelCase__=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCAmelCase__=(5, 3, 3, 1, 1) , lowerCAmelCase__=(1, 2, 3, 1, 1) , lowerCAmelCase__=5_12 , lowerCAmelCase__=80 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Tuple: '''simple docstring''' super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ ) __lowercase = hidden_size __lowercase = feat_extract_norm __lowercase = feat_extract_activation __lowercase = list(lowerCAmelCase__ ) __lowercase = list(lowerCAmelCase__ ) __lowercase = list(lowerCAmelCase__ ) __lowercase = conv_bias __lowercase = num_buckets __lowercase = max_bucket_distance __lowercase = num_conv_pos_embeddings __lowercase = num_conv_pos_embedding_groups __lowercase = len(self.conv_dim ) __lowercase = num_hidden_layers __lowercase = intermediate_size __lowercase = hidden_act __lowercase = num_attention_heads __lowercase = hidden_dropout __lowercase = attention_dropout __lowercase = activation_dropout __lowercase = feat_proj_dropout __lowercase = final_dropout __lowercase = layerdrop __lowercase = layer_norm_eps __lowercase = initializer_range __lowercase = num_ctc_classes __lowercase = vocab_size __lowercase = do_stable_layer_norm __lowercase = use_weighted_layer_sum __lowercase = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`," F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowercase = apply_spec_augment __lowercase = mask_time_prob __lowercase = mask_time_length __lowercase = mask_time_min_masks __lowercase = mask_feature_prob __lowercase = mask_feature_length # parameters for pretraining with codevector quantized representations __lowercase = num_codevectors_per_group __lowercase = num_codevector_groups __lowercase = contrastive_logits_temperature __lowercase = num_negatives __lowercase = codevector_dim __lowercase = proj_codevector_dim __lowercase = diversity_loss_weight # ctc loss __lowercase = ctc_loss_reduction __lowercase = ctc_zero_infinity # adapter __lowercase = add_adapter __lowercase = adapter_kernel_size __lowercase = adapter_stride __lowercase = num_adapter_layers __lowercase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. __lowercase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __lowercase = list(lowerCAmelCase__ ) __lowercase = list(lowerCAmelCase__ ) __lowercase = list(lowerCAmelCase__ ) __lowercase = xvector_output_dim @property def _SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
210
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : str ) -> list[int]: '''simple docstring''' UpperCAmelCase_ = [0 for i in range(len(snake_case_ ) )] # initialize interval's left pointer and right pointer UpperCAmelCase_ , UpperCAmelCase_ = 0, 0 for i in range(1 , len(snake_case_ ) ): # case when current index is inside the interval if i <= right_pointer: UpperCAmelCase_ = min(right_pointer - i + 1 , z_result[i - left_pointer] ) UpperCAmelCase_ = min_edge while go_next(snake_case_ , snake_case_ , snake_case_ ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: UpperCAmelCase_ , UpperCAmelCase_ = i, i + z_result[i] - 1 return z_result def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : str ) -> bool: '''simple docstring''' return i + z_result[i] < len(snake_case_ ) and s[z_result[i]] == s[i + z_result[i]] def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> int: '''simple docstring''' UpperCAmelCase_ = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string UpperCAmelCase_ = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(snake_case_ ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
106
'''simple docstring''' import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class __A : @property def _lowercase (self : int ): return self.get_dummy_input() @property def _lowercase (self : Any ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def _lowercase (self : Tuple , __a : List[str]=True , __a : Any=False , __a : List[Any]=False , __a : Any=False , ): UpperCAmelCase_ = 4 UpperCAmelCase_ = 32 UpperCAmelCase_ = (32, 32) UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = torch.device(__a ) UpperCAmelCase_ = (batch_size, num_channels) + sizes UpperCAmelCase_ = randn_tensor(__a , generator=__a , device=__a ) UpperCAmelCase_ = {"hidden_states": hidden_states} if include_temb: UpperCAmelCase_ = 128 UpperCAmelCase_ = randn_tensor((batch_size, temb_channels) , generator=__a , device=__a ) if include_res_hidden_states_tuple: UpperCAmelCase_ = torch.manual_seed(1 ) UpperCAmelCase_ = (randn_tensor(__a , generator=__a , device=__a ),) if include_encoder_hidden_states: UpperCAmelCase_ = floats_tensor((batch_size, 32, 32) ).to(__a ) if include_skip_sample: UpperCAmelCase_ = randn_tensor(((batch_size, 3) + sizes) , generator=__a , device=__a ) return dummy_input def _lowercase (self : Tuple ): UpperCAmelCase_ = { "in_channels": 32, "out_channels": 32, "temb_channels": 128, } if self.block_type == "up": UpperCAmelCase_ = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) UpperCAmelCase_ = self.dummy_input return init_dict, inputs_dict def _lowercase (self : Tuple , __a : Any ): UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase_ = self.block_class(**__a ) unet_block.to(__a ) unet_block.eval() with torch.no_grad(): UpperCAmelCase_ = unet_block(**__a ) if isinstance(__a , __a ): UpperCAmelCase_ = output[0] self.assertEqual(output.shape , self.output_shape ) UpperCAmelCase_ = output[0, -1, -3:, -3:] UpperCAmelCase_ = torch.tensor(__a ).to(__a ) assert torch_all_close(output_slice.flatten() , __a , atol=5E-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def _lowercase (self : Dict ): UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase_ = self.block_class(**__a ) model.to(__a ) model.train() UpperCAmelCase_ = model(**__a ) if isinstance(__a , __a ): UpperCAmelCase_ = output[0] UpperCAmelCase_ = torch.device(__a ) UpperCAmelCase_ = randn_tensor(output.shape , device=__a ) UpperCAmelCase_ = torch.nn.functional.mse_loss(__a , __a ) loss.backward()
106
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json', 'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json', 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json', 'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json', 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json', 'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json', 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json', 'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json', } class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "funnel" __SCREAMING_SNAKE_CASE = { "hidden_size": "d_model", "num_attention_heads": "n_head", } def __init__( self , __lowerCamelCase=3_0_5_2_2 , __lowerCamelCase=[4, 4, 4] , __lowerCamelCase=None , __lowerCamelCase=2 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=6_4 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu_new" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase=None , __lowerCamelCase=1e-9 , __lowerCamelCase="mean" , __lowerCamelCase="relative_shift" , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , **__lowerCamelCase , ) -> Tuple: _A : int = vocab_size _A : Optional[Any] = block_sizes _A : Tuple = [1] * len(__lowerCamelCase) if block_repeats is None else block_repeats assert len(__lowerCamelCase) == len( self.block_repeats), "`block_sizes` and `block_repeats` should have the same length." _A : Optional[Any] = num_decoder_layers _A : List[str] = d_model _A : Dict = n_head _A : Optional[int] = d_head _A : List[Any] = d_inner _A : Any = hidden_act _A : List[Any] = hidden_dropout _A : Dict = attention_dropout _A : List[Any] = activation_dropout _A : List[Any] = initializer_range _A : str = initializer_std _A : Any = layer_norm_eps assert pooling_type in [ "mean", "max", ], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported." _A : Union[str, Any] = pooling_type assert attention_type in [ "relative_shift", "factorized", ], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported." _A : List[Any] = attention_type _A : Tuple = separate_cls _A : Tuple = truncate_seq _A : int = pool_q_only super().__init__(**__lowerCamelCase) @property def _lowerCamelCase ( self) -> Any: return sum(self.block_sizes) @num_hidden_layers.setter def _lowerCamelCase ( self , __lowerCamelCase) -> int: raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.") @property def _lowerCamelCase ( self) -> Any: return len(self.block_sizes) @num_blocks.setter def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple: raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`.")
11
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def _a ( SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(SCREAMING_SNAKE_CASE , '''_dynamo''' ): return False return isinstance(SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule ) def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : bool = True ): """simple docstring""" UpperCamelCase__ : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) UpperCamelCase__ : Optional[int] = is_compiled_module(SCREAMING_SNAKE_CASE ) if is_compiled: UpperCamelCase__ : Optional[int] = model UpperCamelCase__ : Optional[Any] = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): UpperCamelCase__ : Optional[int] = model.module if not keep_fpaa_wrapper: UpperCamelCase__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE , '''forward''' ) UpperCamelCase__ : Optional[Any] = model.__dict__.pop('''_original_forward''' , SCREAMING_SNAKE_CASE ) if original_forward is not None: while hasattr(SCREAMING_SNAKE_CASE , '''__wrapped__''' ): UpperCamelCase__ : Optional[int] = forward.__wrapped__ if forward == original_forward: break UpperCamelCase__ : Optional[Any] = forward if getattr(SCREAMING_SNAKE_CASE , '''_converted_to_transformer_engine''' , SCREAMING_SNAKE_CASE ): convert_model(SCREAMING_SNAKE_CASE , to_transformer_engine=SCREAMING_SNAKE_CASE ) if is_compiled: UpperCamelCase__ : Tuple = model UpperCamelCase__ : Tuple = compiled_model return model def _a ( ): """simple docstring""" PartialState().wait_for_everyone() def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) elif PartialState().local_process_index == 0: torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @contextmanager def _a ( **SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for key, value in kwargs.items(): UpperCamelCase__ : Dict = str(SCREAMING_SNAKE_CASE ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def _a ( SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" if not hasattr(SCREAMING_SNAKE_CASE , '''__qualname__''' ) and not hasattr(SCREAMING_SNAKE_CASE , '''__name__''' ): UpperCamelCase__ : str = getattr(SCREAMING_SNAKE_CASE , '''__class__''' , SCREAMING_SNAKE_CASE ) if hasattr(SCREAMING_SNAKE_CASE , '''__qualname__''' ): return obj.__qualname__ if hasattr(SCREAMING_SNAKE_CASE , '''__name__''' ): return obj.__name__ return str(SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ): """simple docstring""" for key, value in source.items(): if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): UpperCamelCase__ : Optional[Any] = destination.setdefault(SCREAMING_SNAKE_CASE , {} ) merge_dicts(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: UpperCamelCase__ : List[Any] = value return destination def _a ( SCREAMING_SNAKE_CASE : int = None ): """simple docstring""" if port is None: UpperCamelCase__ : Union[str, Any] = 29500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('''localhost''', port) ) == 0
146
0
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) _a = logging.getLogger() def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]: """simple docstring""" _UpperCamelCase = '''\n'''.join(__UpperCAmelCase ) Path(__UpperCAmelCase ).open('''w''' ).writelines(__UpperCAmelCase ) _a = 'patrickvonplaten/t5-tiny-random' _a = 'sshleifer/bart-tiny-random' _a = 'sshleifer/tiny-mbart' _a = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class _UpperCAmelCase( _lowerCamelCase ): def UpperCAmelCase ( self , __a) -> List[Any]: '''simple docstring''' _UpperCamelCase = Path(self.get_auto_remove_tmp_dir()) / '''utest_input.source''' _UpperCamelCase = input_file_name.parent / '''utest_output.txt''' assert not output_file_name.exists() _UpperCamelCase = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'''] _dump_articles(lowercase_ , lowercase_) _UpperCamelCase = str(Path(self.get_auto_remove_tmp_dir()) / '''scores.json''') _UpperCamelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization''' _UpperCamelCase = F'''\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '''.split() with patch.object(lowercase_ , '''argv''' , lowercase_): run_generate() assert Path(lowercase_).exists() # os.remove(Path(output_file_name)) def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' self.run_eval_tester(lowercase_) @parameterized.expand([BART_TINY, MBART_TINY]) @slow def UpperCAmelCase ( self , __a) -> int: '''simple docstring''' self.run_eval_tester(lowercase_) @parameterized.expand([T5_TINY, MBART_TINY]) @slow def UpperCAmelCase ( self , __a) -> int: '''simple docstring''' _UpperCamelCase = Path(self.get_auto_remove_tmp_dir()) / '''utest_input.source''' _UpperCamelCase = input_file_name.parent / '''utest_output.txt''' assert not output_file_name.exists() _UpperCamelCase = { '''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''], '''de''': [ '''Maschinelles Lernen ist großartig, oder?''', '''Ich esse gerne Bananen''', '''Morgen ist wieder ein toller Tag!''', ], } _UpperCamelCase = Path(self.get_auto_remove_tmp_dir()) _UpperCamelCase = str(tmp_dir / '''scores.json''') _UpperCamelCase = str(tmp_dir / '''val.target''') _dump_articles(lowercase_ , text['''en''']) _dump_articles(lowercase_ , text['''de''']) _UpperCamelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization''' _UpperCamelCase = F'''\n run_eval_search.py\n {model}\n {str(lowercase_)}\n {str(lowercase_)}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '''.split() testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0''']) with patch.object(lowercase_ , '''argv''' , lowercase_): with CaptureStdout() as cs: run_search() _UpperCamelCase = [''' num_beams | length_penalty''', model, '''Best score args'''] _UpperCamelCase = ['''Info'''] if "translation" in task: expected_strings.append('''bleu''') else: expected_strings.extend(lowercase_) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(lowercase_).exists() os.remove(Path(lowercase_))
356
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class _UpperCAmelCase( lowerCamelCase ): lowercase__ = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) lowercase__ = 'CIDAS/clipseg-rd64-refined' lowercase__ = 'image_segmenter' lowercase__ = CLIPSegForImageSegmentation lowercase__ = ['image', 'text'] lowercase__ = ['image'] def __init__( self , *__a , **__a) -> Any: '''simple docstring''' requires_backends(self , ['''vision''']) super().__init__(*__a , **__a) def UpperCAmelCase ( self , __a , __a) -> Dict: '''simple docstring''' return self.pre_processor(text=[label] , images=[image] , padding=__a , return_tensors='''pt''') def UpperCAmelCase ( self , __a) -> Tuple: '''simple docstring''' with torch.no_grad(): _UpperCamelCase = self.model(**__a).logits return logits def UpperCAmelCase ( self , __a) -> Any: '''simple docstring''' _UpperCamelCase = outputs.cpu().detach().numpy() _UpperCamelCase = 0 _UpperCamelCase = 1 return Image.fromarray((array * 2_55).astype(np.uinta))
100
0
"""simple docstring""" def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): _validate_point(UpperCamelCase_ ) _validate_point(UpperCamelCase_ ) if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(a - b ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ) ) ) def _lowerCAmelCase ( UpperCamelCase_ ): if point: if isinstance(UpperCamelCase_ , UpperCamelCase_ ): for item in point: if not isinstance(UpperCamelCase_ , (int, float) ): __SCREAMING_SNAKE_CASE = ( """Expected a list of numbers as input, found """ f"{type(UpperCamelCase_ ).__name__}" ) raise TypeError(UpperCamelCase_ ) else: __SCREAMING_SNAKE_CASE = f"Expected a list of numbers as input, found {type(UpperCamelCase_ ).__name__}" raise TypeError(UpperCamelCase_ ) else: raise ValueError("""Missing an input""" ) def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): _validate_point(UpperCamelCase_ ) _validate_point(UpperCamelCase_ ) if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(x - y ) for x, y in zip(UpperCamelCase_ , UpperCamelCase_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
100
def __UpperCamelCase ( lowerCAmelCase__ : int = 5_0_0_0_0_0_0_0 ): __a : int = set() __a : str = int((limit - 2_4) ** (1 / 2) ) __a : int = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCAmelCase__ ) ) ) for primea in primes: __a : Union[str, Any] = primea * primea for primea in primes: __a : Union[str, Any] = primea * primea * primea if square + cube >= limit - 1_6: break for primea in primes: __a : int = primea * primea * primea * primea __a : Union[str, Any] = square + cube + tetr if total >= limit: break ret.add(lowerCAmelCase__ ) return len(lowerCAmelCase__ ) if __name__ == "__main__": print(F"""{solution() = }""")
216
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a : Tuple = logging.get_logger(__name__) a : Optional[Any] = { 'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class a ( snake_case__ ): snake_case_ = """donut-swin""" snake_case_ = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : int , lowercase_ : Union[str, Any]=224 , lowercase_ : Tuple=4 , lowercase_ : int=3 , lowercase_ : int=96 , lowercase_ : str=[2, 2, 6, 2] , lowercase_ : Dict=[3, 6, 12, 24] , lowercase_ : Dict=7 , lowercase_ : Dict=4.0 , lowercase_ : List[str]=True , lowercase_ : Optional[int]=0.0 , lowercase_ : int=0.0 , lowercase_ : Optional[int]=0.1 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : int=False , lowercase_ : Optional[int]=0.02 , lowercase_ : int=1e-5 , **lowercase_ : int , ): super().__init__(**_A ) snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = embed_dim snake_case_ = depths snake_case_ = len(_A ) snake_case_ = num_heads snake_case_ = window_size snake_case_ = mlp_ratio snake_case_ = qkv_bias snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = drop_path_rate snake_case_ = hidden_act snake_case_ = use_absolute_embeddings snake_case_ = layer_norm_eps snake_case_ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case_ = int(embed_dim * 2 ** (len(_A ) - 1) )
363
'''simple docstring''' import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger a : Any = get_logger(__name__) a : Union[str, Any] = Path(__file__).parent / 'model_card_template.md' a : List[Any] = uuida().hex a : List[str] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES a : str = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES a : Optional[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/' def __magic_name__ ( __UpperCAmelCase = None ) -> str: '''simple docstring''' snake_case_ = F"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += F"; torch/{_torch_version}" if is_flax_available(): ua += F"; jax/{_jax_version}" ua += F"; flax/{_flax_version}" if is_onnx_available(): ua += F"; onnxruntime/{_onnxruntime_version}" # CI will set this value to True if os.environ.get('''DIFFUSERS_IS_CI''', '''''' ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(__UpperCAmelCase, __UpperCAmelCase ): ua += "; " + "; ".join(F"{k}/{v}" for k, v in user_agent.items() ) elif isinstance(__UpperCAmelCase, __UpperCAmelCase ): ua += "; " + user_agent return ua def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = None, __UpperCAmelCase = None ) -> Optional[Any]: '''simple docstring''' if token is None: snake_case_ = HfFolder.get_token() if organization is None: snake_case_ = whoami(__UpperCAmelCase )['''name'''] return F"{username}/{model_id}" else: return F"{organization}/{model_id}" def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' if not is_jinja_available(): raise ValueError( '''Modelcard rendering is based on Jinja templates.''' ''' Please make sure to have `jinja` installed before using `create_model_card`.''' ''' To install it, please run `pip install Jinja2`.''' ) if hasattr(__UpperCAmelCase, '''local_rank''' ) and args.local_rank not in [-1, 0]: return snake_case_ = args.hub_token if hasattr(__UpperCAmelCase, '''hub_token''' ) else None snake_case_ = get_full_repo_name(__UpperCAmelCase, token=__UpperCAmelCase ) snake_case_ = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language='''en''', license='''apache-2.0''', library_name='''diffusers''', tags=[], datasets=args.dataset_name, metrics=[], ), template_path=__UpperCAmelCase, model_name=__UpperCAmelCase, repo_name=__UpperCAmelCase, dataset_name=args.dataset_name if hasattr(__UpperCAmelCase, '''dataset_name''' ) else None, learning_rate=args.learning_rate, train_batch_size=args.train_batch_size, eval_batch_size=args.eval_batch_size, gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(__UpperCAmelCase, '''gradient_accumulation_steps''' ) else None ), adam_betaa=args.adam_betaa if hasattr(__UpperCAmelCase, '''adam_beta1''' ) else None, adam_betaa=args.adam_betaa if hasattr(__UpperCAmelCase, '''adam_beta2''' ) else None, adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCAmelCase, '''adam_weight_decay''' ) else None, adam_epsilon=args.adam_epsilon if hasattr(__UpperCAmelCase, '''adam_epsilon''' ) else None, lr_scheduler=args.lr_scheduler if hasattr(__UpperCAmelCase, '''lr_scheduler''' ) else None, lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCAmelCase, '''lr_warmup_steps''' ) else None, ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCAmelCase, '''ema_inv_gamma''' ) else None, ema_power=args.ema_power if hasattr(__UpperCAmelCase, '''ema_power''' ) else None, ema_max_decay=args.ema_max_decay if hasattr(__UpperCAmelCase, '''ema_max_decay''' ) else None, mixed_precision=args.mixed_precision, ) snake_case_ = os.path.join(args.output_dir, '''README.md''' ) model_card.save(__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = None ) -> Optional[Any]: '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash snake_case_ = str(Path(__UpperCAmelCase ).as_posix() ) snake_case_ = re.search(r'''snapshots/([^/]+)/''', __UpperCAmelCase ) if search is None: return None snake_case_ = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(__UpperCAmelCase ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. a : str = os.path.expanduser( os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface')) ) a : Optional[Any] = os.path.join(hf_cache_home, 'diffusers') def __magic_name__ ( __UpperCAmelCase = None, __UpperCAmelCase = None ) -> None: '''simple docstring''' if new_cache_dir is None: snake_case_ = DIFFUSERS_CACHE if old_cache_dir is None: snake_case_ = old_diffusers_cache snake_case_ = Path(__UpperCAmelCase ).expanduser() snake_case_ = Path(__UpperCAmelCase ).expanduser() for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): snake_case_ = new_cache_dir / old_blob_path.relative_to(__UpperCAmelCase ) new_blob_path.parent.mkdir(parents=__UpperCAmelCase, exist_ok=__UpperCAmelCase ) os.replace(__UpperCAmelCase, __UpperCAmelCase ) try: os.symlink(__UpperCAmelCase, __UpperCAmelCase ) except OSError: logger.warning( '''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). a : Tuple = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt') if not os.path.isfile(cache_version_file): a : Tuple = 0 else: with open(cache_version_file) as f: try: a : Optional[Any] = int(f.read()) except ValueError: a : List[str] = 0 if cache_version < 1: a : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( 'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ' 'existing cached models. This is a one-time operation, you can interrupt it or run it ' 'later by calling `diffusers.utils.hub_utils.move_cache()`.' ) try: move_cache() except Exception as e: a : str = '\n'.join(traceback.format_tb(e.__traceback__)) logger.error( f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease ''' 'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ' 'message and we will do our best to help.' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, 'w') as f: f.write('1') except Exception: logger.warning( f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure ''' 'the directory exists and can be written to.' ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = None ) -> str: '''simple docstring''' if variant is not None: snake_case_ = weights_name.split('''.''' ) snake_case_ = splits[:-1] + [variant] + splits[-1:] snake_case_ = '''.'''.join(__UpperCAmelCase ) return weights_name def __magic_name__ ( __UpperCAmelCase, *, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=None, ) -> int: '''simple docstring''' snake_case_ = str(__UpperCAmelCase ) if os.path.isfile(__UpperCAmelCase ): return pretrained_model_name_or_path elif os.path.isdir(__UpperCAmelCase ): if os.path.isfile(os.path.join(__UpperCAmelCase, __UpperCAmelCase ) ): # Load from a PyTorch checkpoint snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) ): snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) return model_file else: raise EnvironmentError( F"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(__UpperCAmelCase ).base_version ) >= version.parse('''0.20.0''' ) ): try: snake_case_ = hf_hub_download( __UpperCAmelCase, filename=_add_variant(__UpperCAmelCase, __UpperCAmelCase ), cache_dir=__UpperCAmelCase, force_download=__UpperCAmelCase, proxies=__UpperCAmelCase, resume_download=__UpperCAmelCase, local_files_only=__UpperCAmelCase, use_auth_token=__UpperCAmelCase, user_agent=__UpperCAmelCase, subfolder=__UpperCAmelCase, revision=revision or commit_hash, ) warnings.warn( F"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.", __UpperCAmelCase, ) return model_file except: # noqa: E722 warnings.warn( F"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCAmelCase, __UpperCAmelCase )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__UpperCAmelCase, __UpperCAmelCase )}' so that the correct variant file can be added.", __UpperCAmelCase, ) try: # 2. Load model file as usual snake_case_ = hf_hub_download( __UpperCAmelCase, filename=__UpperCAmelCase, cache_dir=__UpperCAmelCase, force_download=__UpperCAmelCase, proxies=__UpperCAmelCase, resume_download=__UpperCAmelCase, local_files_only=__UpperCAmelCase, use_auth_token=__UpperCAmelCase, user_agent=__UpperCAmelCase, subfolder=__UpperCAmelCase, revision=revision or commit_hash, ) return model_file except RepositoryNotFoundError: raise EnvironmentError( F"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " '''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a ''' '''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli ''' '''login`.''' ) except RevisionNotFoundError: raise EnvironmentError( F"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " '''this model name. Check the model page at ''' F"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." ) except EntryNotFoundError: raise EnvironmentError( F"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." ) except HTTPError as err: raise EnvironmentError( F"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" ) except ValueError: raise EnvironmentError( F"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" F" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" F" directory containing a file named {weights_name} or" ''' \nCheckout your internet connection or see how to run the library in''' ''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' ) except EnvironmentError: raise EnvironmentError( F"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " '''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. ''' F"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " F"containing a file named {weights_name}" )
72
0
_lowercase: Any = 9.80_665 def a( A : float , A : float , A : float = g ) -> float: """simple docstring""" if fluid_density <= 0: raise ValueError("Impossible fluid density" ) if volume < 0: raise ValueError("Impossible Object volume" ) if gravity <= 0: raise ValueError("Impossible Gravity" ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
227
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def a( A : dict ) -> tuple: """simple docstring""" return (data["data"], data["target"]) def a( A : np.ndarray , A : np.ndarray ) -> XGBClassifier: """simple docstring""" a = XGBClassifier() classifier.fit(A , A ) return classifier def a( ) -> None: """simple docstring""" a = load_iris() a , a = data_handling(A ) a , a , a , a = train_test_split( A , A , test_size=0.25 ) a = iris["target_names"] # Create an XGBoost Classifier from the training data a = xgboost(A , A ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( A , A , A , display_labels=A , cmap="Blues" , normalize="true" , ) plt.title("Normalized Confusion Matrix - IRIS Dataset" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
227
1
'''simple docstring''' _a : int = { """A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""", """H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""", """O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""", """V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""", """2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""", """8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""", """:""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""", """?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""", """(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/""" } # Exclamation mark is not in ITU-R recommendation # fmt: on _a : List[str] = {value: key for key, value in MORSE_CODE_DICT.items()} def _lowerCAmelCase ( lowercase ) -> str: return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def _lowerCAmelCase ( lowercase ) -> str: return "".join(REVERSE_DICT[char] for char in message.split() ) def _lowerCAmelCase ( ) -> None: __lowerCAmelCase = """Morse code here!""" print(lowercase ) __lowerCAmelCase = encrypt(lowercase ) print(lowercase ) __lowerCAmelCase = decrypt(lowercase ) print(lowercase ) if __name__ == "__main__": main()
46
'''simple docstring''' import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort _a : List[Any] = logging.get_logger(__name__) _a : Union[str, Any] = { """tensor(bool)""": np.bool_, """tensor(int8)""": np.inta, """tensor(uint8)""": np.uinta, """tensor(int16)""": np.intaa, """tensor(uint16)""": np.uintaa, """tensor(int32)""": np.intaa, """tensor(uint32)""": np.uintaa, """tensor(int64)""": np.intaa, """tensor(uint64)""": np.uintaa, """tensor(float16)""": np.floataa, """tensor(float)""": np.floataa, """tensor(double)""": np.floataa, } class _UpperCAmelCase : def __init__( self,__SCREAMING_SNAKE_CASE=None,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" ) __lowerCAmelCase = model __lowerCAmelCase = kwargs.get("""model_save_dir""",__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = kwargs.get("""latest_model_name""",__SCREAMING_SNAKE_CASE ) def __call__( self,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = {k: np.array(__SCREAMING_SNAKE_CASE ) for k, v in kwargs.items()} return self.model.run(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) @staticmethod def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None ): '''simple docstring''' if provider is None: logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" ) __lowerCAmelCase = """CPUExecutionProvider""" return ort.InferenceSession(__SCREAMING_SNAKE_CASE,providers=[provider],sess_options=__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME __lowerCAmelCase = self.model_save_dir.joinpath(self.latest_model_name ) __lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE ).joinpath(__SCREAMING_SNAKE_CASE ) try: shutil.copyfile(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) except shutil.SameFileError: pass # copy external weights (for models >2GB) __lowerCAmelCase = self.model_save_dir.joinpath(__SCREAMING_SNAKE_CASE ) if src_path.exists(): __lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE ).joinpath(__SCREAMING_SNAKE_CASE ) try: shutil.copyfile(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) except shutil.SameFileError: pass def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,): '''simple docstring''' if os.path.isfile(__SCREAMING_SNAKE_CASE ): logger.error(f'Provided path ({save_directory}) should be a directory, not a file' ) return os.makedirs(__SCREAMING_SNAKE_CASE,exist_ok=__SCREAMING_SNAKE_CASE ) # saving model weights/files self._save_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ) @classmethod def lowerCamelCase__ ( cls,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,): '''simple docstring''' __lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = OnnxRuntimeModel.load_model( os.path.join(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ),provider=__SCREAMING_SNAKE_CASE,sess_options=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE ) # load model from hub else: # download model __lowerCAmelCase = hf_hub_download( repo_id=__SCREAMING_SNAKE_CASE,filename=__SCREAMING_SNAKE_CASE,use_auth_token=__SCREAMING_SNAKE_CASE,revision=__SCREAMING_SNAKE_CASE,cache_dir=__SCREAMING_SNAKE_CASE,force_download=__SCREAMING_SNAKE_CASE,) __lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE ).parent __lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE ).name __lowerCAmelCase = OnnxRuntimeModel.load_model(__SCREAMING_SNAKE_CASE,provider=__SCREAMING_SNAKE_CASE,sess_options=__SCREAMING_SNAKE_CASE ) return cls(model=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ) @classmethod def lowerCamelCase__ ( cls,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,): '''simple docstring''' __lowerCAmelCase = None if len(str(__SCREAMING_SNAKE_CASE ).split("""@""" ) ) == 2: __lowerCAmelCase , __lowerCAmelCase = model_id.split("""@""" ) return cls._from_pretrained( model_id=__SCREAMING_SNAKE_CASE,revision=__SCREAMING_SNAKE_CASE,cache_dir=__SCREAMING_SNAKE_CASE,force_download=__SCREAMING_SNAKE_CASE,use_auth_token=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
46
1
import math import flax.linen as nn import jax.numpy as jnp def lowerCamelCase_ ( _a : jnp.ndarray , _a : int , _a : float = 1 , _a : float = 1 , _a : float = 1.0E4 , _a : bool = False , _a : float = 1.0 , ): '''simple docstring''' assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even''' UpperCAmelCase_ : Optional[Any] = float(embedding_dim // 2 ) UpperCAmelCase_ : Dict = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) UpperCAmelCase_ : Tuple = min_timescale * jnp.exp(jnp.arange(_a , dtype=jnp.floataa ) * -log_timescale_increment ) UpperCAmelCase_ : Any = jnp.expand_dims(_a , 1 ) * jnp.expand_dims(_a , 0 ) # scale embeddings UpperCAmelCase_ : List[str] = scale * emb if flip_sin_to_cos: UpperCAmelCase_ : int = jnp.concatenate([jnp.cos(_a ), jnp.sin(_a )] , axis=1 ) else: UpperCAmelCase_ : Optional[Any] = jnp.concatenate([jnp.sin(_a ), jnp.cos(_a )] , axis=1 ) UpperCAmelCase_ : Union[str, Any] = jnp.reshape(_a , [jnp.shape(_a )[0], embedding_dim] ) return signal class _snake_case ( nn.Module ): '''simple docstring''' A__ : int = 32 A__ : jnp.dtype = jnp.floataa @nn.compact def __call__( self: Any ,lowerCamelCase_: Any ) -> List[str]: UpperCAmelCase_ : Optional[int] = nn.Dense(self.time_embed_dim ,dtype=self.dtype ,name="""linear_1""" )(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = nn.silu(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = nn.Dense(self.time_embed_dim ,dtype=self.dtype ,name="""linear_2""" )(lowerCamelCase_ ) return temb class _snake_case ( nn.Module ): '''simple docstring''' A__ : int = 32 A__ : bool = False A__ : float = 1 @nn.compact def __call__( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> List[Any]: return get_sinusoidal_embeddings( lowerCamelCase_ ,embedding_dim=self.dim ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.freq_shift )
345
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=__snake_case ) class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} ) A__ : ClassVar[Features] = Features({"audio": Audio()} ) A__ : ClassVar[Features] = Features({"transcription": Value("string" )} ) A__ : str = "audio" A__ : str = "transcription" def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]: if self.audio_column not in features: raise ValueError(F'''Column {self.audio_column} is not present in features.''' ) if not isinstance(features[self.audio_column] ,lowerCamelCase_ ): raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' ) UpperCAmelCase_ : Any = copy.deepcopy(self ) UpperCAmelCase_ : Union[str, Any] = self.input_schema.copy() UpperCAmelCase_ : Any = features[self.audio_column] UpperCAmelCase_ : Union[str, Any] = input_schema return task_template @property def A__ ( self: List[str] ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
345
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) snake_case : List[str] = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : Optional[Any] = ['''DeiTFeatureExtractor'''] snake_case : Tuple = ['''DeiTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : Optional[Any] = [ '''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DeiTForImageClassification''', '''DeiTForImageClassificationWithTeacher''', '''DeiTForMaskedImageModeling''', '''DeiTModel''', '''DeiTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : str = [ '''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDeiTForImageClassification''', '''TFDeiTForImageClassificationWithTeacher''', '''TFDeiTForMaskedImageModeling''', '''TFDeiTModel''', '''TFDeiTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys snake_case : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
357
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class snake_case_ (unittest.TestCase ): def lowerCamelCase__( self :List[Any] ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCamelCase__( self :int ) -> Optional[Any]: a__ , a__ = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-canny' ,from_pt=__snake_case ,dtype=jnp.bfloataa ) a__ , a__ = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,controlnet=__snake_case ,from_pt=__snake_case ,dtype=jnp.bfloataa ) a__ = controlnet_params a__ = 'bird' a__ = jax.device_count() a__ = pipe.prepare_text_inputs([prompts] * num_samples ) a__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ) a__ = pipe.prepare_image_inputs([canny_image] * num_samples ) a__ = jax.random.PRNGKey(0 ) a__ = jax.random.split(__snake_case ,jax.device_count() ) a__ = replicate(__snake_case ) a__ = shard(__snake_case ) a__ = shard(__snake_case ) a__ = pipe( prompt_ids=__snake_case ,image=__snake_case ,params=__snake_case ,prng_seed=__snake_case ,num_inference_steps=50 ,jit=__snake_case ,).images assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3) a__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) a__ = images[0, 2_53:2_56, 2_53:2_56, -1] a__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) a__ = jnp.array( [0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]: a__ , a__ = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-openpose' ,from_pt=__snake_case ,dtype=jnp.bfloataa ) a__ , a__ = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,controlnet=__snake_case ,from_pt=__snake_case ,dtype=jnp.bfloataa ) a__ = controlnet_params a__ = 'Chef in the kitchen' a__ = jax.device_count() a__ = pipe.prepare_text_inputs([prompts] * num_samples ) a__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' ) a__ = pipe.prepare_image_inputs([pose_image] * num_samples ) a__ = jax.random.PRNGKey(0 ) a__ = jax.random.split(__snake_case ,jax.device_count() ) a__ = replicate(__snake_case ) a__ = shard(__snake_case ) a__ = shard(__snake_case ) a__ = pipe( prompt_ids=__snake_case ,image=__snake_case ,params=__snake_case ,prng_seed=__snake_case ,num_inference_steps=50 ,jit=__snake_case ,).images assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3) a__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) a__ = images[0, 2_53:2_56, 2_53:2_56, -1] a__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) a__ = jnp.array( [[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
109
0
def UpperCAmelCase__ ( lowerCamelCase ): lowercase :Dict = len(lowerCamelCase ) lowercase :Optional[Any] = len(matrix[0] ) lowercase :Any = min(lowerCamelCase, lowerCamelCase ) for row in range(lowerCamelCase ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1, lowerCamelCase ): lowercase :Optional[int] = matrix[col][row] / matrix[row][row] for i in range(lowerCamelCase, lowerCamelCase ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows lowercase :int = True for i in range(row + 1, lowerCamelCase ): if matrix[i][row] != 0: lowercase , lowercase :Optional[Any] = matrix[i], matrix[row] lowercase :Optional[Any] = False break if reduce: rank -= 1 for i in range(lowerCamelCase ): lowercase :str = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
236
_UpperCAmelCase : Tuple = {str(digit): digit**5 for digit in range(10)} def UpperCAmelCase__ ( lowerCamelCase ): return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase ) ) def UpperCAmelCase__ ( ): return sum( number for number in range(1000, 1000000 ) if number == digits_fifth_powers_sum(lowerCamelCase ) ) if __name__ == "__main__": print(solution())
236
1
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _snake_case : List[Any] = logging.get_logger(__name__) _snake_case : List[str] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} _snake_case : Dict = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } _snake_case : Any = { 'gpt-neox-20b': 2048, } class A ( A__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = ['input_ids', 'attention_mask'] def __init__( self : Dict , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[Any]="<|endoftext|>" , lowerCAmelCase_ : Tuple="<|endoftext|>" , lowerCAmelCase_ : Tuple="<|endoftext|>" , lowerCAmelCase_ : Union[str, Any]=False , **lowerCAmelCase_ : List[str] , ) -> Optional[Any]: """simple docstring""" super().__init__( __snake_case , __snake_case , tokenizer_file=__snake_case , unk_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , ) _a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __snake_case ) != add_prefix_space: _a = getattr(__snake_case , pre_tok_state.pop('''type''' ) ) _a = add_prefix_space _a = pre_tok_class(**__snake_case ) _a = add_prefix_space def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" _a = self._tokenizer.model.save(__snake_case , name=__snake_case ) return tuple(__snake_case ) def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : "Conversation" ) -> List[int]: """simple docstring""" _a = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__snake_case , add_special_tokens=__snake_case ) + [self.eos_token_id] ) if len(__snake_case ) > self.model_max_length: _a = input_ids[-self.model_max_length :] return input_ids
353
'''simple docstring''' from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class A ( _a ): lowercase_ = 42 lowercase_ = 42 if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
179
0
'''simple docstring''' def snake_case_ (_a : int ): if number < 0: raise ValueError('''number must not be negative''' ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
34
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json''' with io.open(filename, '''r''', encoding='''utf-8''') as f: __snake_case = json.load(f) @require_torch class __lowerCamelCase (unittest.TestCase ): def snake_case_ ( self: int,A_: int ): '''simple docstring''' return FSMTTokenizer.from_pretrained(A_ ) def snake_case_ ( self: Dict,A_: int ): '''simple docstring''' __UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['en-ru', 2_6.0], ['ru-en', 2_2.0], ['en-de', 2_2.0], ['de-en', 2_9.0], ] ) @slow def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ): '''simple docstring''' __UpperCamelCase = F'''facebook/wmt19-{pair}''' __UpperCamelCase = self.get_tokenizer(A_ ) __UpperCamelCase = self.get_model(A_ ) __UpperCamelCase = bleu_data[pair]['src'] __UpperCamelCase = bleu_data[pair]['tgt'] __UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ ) __UpperCamelCase = model.generate( input_ids=batch.input_ids,num_beams=8,) __UpperCamelCase = tokenizer.batch_decode( A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ ) __UpperCamelCase = calculate_bleu(A_,A_ ) print(A_ ) self.assertGreaterEqual(scores['bleu'],A_ )
310
0
'''simple docstring''' import torch from diffusers import StableDiffusionPipeline A__ : Union[str, Any] ='''path-to-your-trained-model''' A__ : Optional[int] =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''') A__ : Dict ='''A photo of sks dog in a bucket''' A__ : int =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save('''dog-bucket.png''')
220
'''simple docstring''' import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration A__ : str =5_00_00 A__ : Optional[int] =50_00 A__ , A__ : Optional[int] =os.path.split(__file__) A__ : Tuple =os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" for i in range(lowerCAmelCase ): _lowerCAmelCase = dataset[i] @get_duration def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" for i in range(0 , len(lowerCAmelCase ) , lowerCAmelCase ): _lowerCAmelCase = dataset[i : i + batch_size] @get_duration def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with dataset.formatted_as(type=lowerCAmelCase ): for i in range(lowerCAmelCase ): _lowerCAmelCase = dataset[i] @get_duration def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with dataset.formatted_as(type=lowerCAmelCase ): for i in range(0 , lowerCAmelCase , lowerCAmelCase ): _lowerCAmelCase = dataset[i : i + batch_size] def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = {"""num examples""": SPEED_TEST_N_EXAMPLES} _lowerCAmelCase = [ (read, {"""length""": SMALL_TEST}), (read, {"""length""": SPEED_TEST_N_EXAMPLES}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}), (read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}), ] _lowerCAmelCase = [ (read, {"""length""": SMALL_TEST}), (read, {"""length""": SPEED_TEST_N_EXAMPLES}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}), (read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}), ] with tempfile.TemporaryDirectory() as tmp_dir: print("""generating dataset""" ) _lowerCAmelCase = datasets.Features( {"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} ) _lowerCAmelCase = generate_example_dataset( os.path.join(lowerCAmelCase , """dataset.arrow""" ) , lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes={"""list""": (1_00,)} , ) print("""first set of iterations""" ) for func, kwargs in functions: print(func.__name__ , str(lowerCAmelCase ) ) _lowerCAmelCase = func(lowerCAmelCase , **lowerCAmelCase ) print("""shuffling dataset""" ) _lowerCAmelCase = dataset.shuffle() print("""Second set of iterations (after shuffling""" ) for func, kwargs in functions_shuffled: print("""shuffled """ , func.__name__ , str(lowerCAmelCase ) ) _lowerCAmelCase = func( lowerCAmelCase , **lowerCAmelCase ) with open(lowerCAmelCase , """wb""" ) as f: f.write(json.dumps(lowerCAmelCase ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
220
1
'''simple docstring''' import re def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[int] = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" ) if match := re.search(_A , _A ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
265
def UpperCAmelCase_ ( _A ): '''simple docstring''' return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
314
0
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __magic_name__: int = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__: List[str] = [ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __magic_name__: str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
138
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class snake_case__ : def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=30 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , lowerCAmelCase__=2 , ) -> Optional[Any]: __magic_name__ : Optional[Any] = parent __magic_name__ : List[str] = batch_size __magic_name__ : Union[str, Any] = image_size __magic_name__ : Optional[Any] = patch_size __magic_name__ : Union[str, Any] = num_channels __magic_name__ : Union[str, Any] = is_training __magic_name__ : Union[str, Any] = use_labels __magic_name__ : Tuple = hidden_size __magic_name__ : List[str] = num_hidden_layers __magic_name__ : Optional[Any] = num_attention_heads __magic_name__ : Tuple = intermediate_size __magic_name__ : Union[str, Any] = hidden_act __magic_name__ : str = hidden_dropout_prob __magic_name__ : List[str] = attention_probs_dropout_prob __magic_name__ : Tuple = type_sequence_label_size __magic_name__ : Any = initializer_range __magic_name__ : str = scope __magic_name__ : str = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __magic_name__ : Dict = (image_size // patch_size) ** 2 __magic_name__ : int = num_patches + 2 def __magic_name__ ( self ) -> Union[str, Any]: __magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __magic_name__ : Tuple = None if self.use_labels: __magic_name__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Union[str, Any] = self.get_config() return config, pixel_values, labels def __magic_name__ ( self ) -> List[str]: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: __magic_name__ : int = TFDeiTModel(config=lowerCAmelCase__ ) __magic_name__ : List[Any] = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: __magic_name__ : int = TFDeiTForMaskedImageModeling(config=lowerCAmelCase__ ) __magic_name__ : List[str] = model(lowerCAmelCase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __magic_name__ : Tuple = 1 __magic_name__ : List[Any] = TFDeiTForMaskedImageModeling(lowerCAmelCase__ ) __magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __magic_name__ : Any = model(lowerCAmelCase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: __magic_name__ : Union[str, Any] = self.type_sequence_label_size __magic_name__ : str = TFDeiTForImageClassification(lowerCAmelCase__ ) __magic_name__ : List[str] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __magic_name__ : int = 1 __magic_name__ : List[str] = TFDeiTForImageClassification(lowerCAmelCase__ ) __magic_name__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __magic_name__ : Dict = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __magic_name__ ( self ) -> List[str]: __magic_name__ : Union[str, Any] = self.prepare_config_and_inputs() __magic_name__ ,__magic_name__ ,__magic_name__ : Union[str, Any] = config_and_inputs __magic_name__ : str = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): lowercase__ : int = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) lowercase__ : Any = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) lowercase__ : int = False lowercase__ : List[Any] = False lowercase__ : Tuple = False lowercase__ : int = False def __magic_name__ ( self ) -> str: __magic_name__ : str = TFDeiTModelTester(self ) __magic_name__ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 ) def __magic_name__ ( self ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def __magic_name__ ( self ) -> Union[str, Any]: pass def __magic_name__ ( self ) -> Union[str, Any]: __magic_name__ ,__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ : List[Any] = model_class(lowerCAmelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) __magic_name__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase__ , tf.keras.layers.Dense ) ) def __magic_name__ ( self ) -> Optional[int]: __magic_name__ ,__magic_name__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ : Any = model_class(lowerCAmelCase__ ) __magic_name__ : List[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __magic_name__ : int = [*signature.parameters.keys()] __magic_name__ : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def __magic_name__ ( self ) -> List[str]: __magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def __magic_name__ ( self ) -> Tuple: __magic_name__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ ) def __magic_name__ ( self ) -> Any: __magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ ) def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str: __magic_name__ : Any = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def __magic_name__ ( self ) -> Dict: for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : int = TFDeiTModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def UpperCamelCase ( ): """simple docstring""" __magic_name__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class snake_case__ ( unittest.TestCase ): @cached_property def __magic_name__ ( self ) -> Optional[int]: return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def __magic_name__ ( self ) -> List[Any]: __magic_name__ : Optional[int] = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) __magic_name__ : Any = self.default_image_processor __magic_name__ : Tuple = prepare_img() __magic_name__ : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors="""tf""" ) # forward pass __magic_name__ : Dict = model(**lowerCAmelCase__ ) # verify the logits __magic_name__ : Optional[Any] = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) __magic_name__ : List[Any] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
138
1
"""simple docstring""" from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE : """simple docstring""" lowercase__ = 42 lowercase__ = None @staticmethod def __lowerCAmelCase ( ): raise NotImplementedError def __lowerCAmelCase ( self : List[Any] ,lowercase_ : List[Any] ,lowercase_ : int ,lowercase_ : str ,**lowercase_ : List[str] ): raise NotImplementedError def __lowerCAmelCase ( self : int ,lowercase_ : int ): raise NotImplementedError def __lowerCAmelCase ( self : List[str] ): if not self.is_available(): raise RuntimeError( F'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' ) @classmethod def __lowerCAmelCase ( cls : str ): return F'`pip install {cls.pip_package or cls.name}`' class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowercase__ = "optuna" @staticmethod def __lowerCAmelCase ( ): return is_optuna_available() def __lowerCAmelCase ( self : Any ,lowercase_ : List[str] ,lowercase_ : int ,lowercase_ : str ,**lowercase_ : Any ): return run_hp_search_optuna(lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ) def __lowerCAmelCase ( self : int ,lowercase_ : List[Any] ): return default_hp_space_optuna(lowercase_ ) class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowercase__ = "ray" lowercase__ = "'ray[tune]'" @staticmethod def __lowerCAmelCase ( ): return is_ray_available() def __lowerCAmelCase ( self : str ,lowercase_ : int ,lowercase_ : int ,lowercase_ : str ,**lowercase_ : str ): return run_hp_search_ray(lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ) def __lowerCAmelCase ( self : Dict ,lowercase_ : Tuple ): return default_hp_space_ray(lowercase_ ) class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowercase__ = "sigopt" @staticmethod def __lowerCAmelCase ( ): return is_sigopt_available() def __lowerCAmelCase ( self : Tuple ,lowercase_ : List[str] ,lowercase_ : int ,lowercase_ : str ,**lowercase_ : List[Any] ): return run_hp_search_sigopt(lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ) def __lowerCAmelCase ( self : str ,lowercase_ : Union[str, Any] ): return default_hp_space_sigopt(lowercase_ ) class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowercase__ = "wandb" @staticmethod def __lowerCAmelCase ( ): return is_wandb_available() def __lowerCAmelCase ( self : str ,lowercase_ : Optional[int] ,lowercase_ : int ,lowercase_ : str ,**lowercase_ : List[Any] ): return run_hp_search_wandb(lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ) def __lowerCAmelCase ( self : Any ,lowercase_ : Optional[int] ): return default_hp_space_wandb(lowercase_ ) __UpperCamelCase : Dict = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def __SCREAMING_SNAKE_CASE ( ): lowerCAmelCase__ : Tuple = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(A_ ) > 0: lowerCAmelCase__ : Optional[int] = available_backends[0].name if len(A_ ) > 1: logger.info( f'{len(A_ )} hyperparameter search backends available. Using {name} as the default.' ) return name raise RuntimeError( '''No hyperparameter search backend available.\n''' + '''\n'''.join( f' - To install {backend.name} run {backend.pip_install()}' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
106
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __UpperCamelCase : Optional[Any] = { '''configuration_rag''': ['''RagConfig'''], '''retrieval_rag''': ['''RagRetriever'''], '''tokenization_rag''': ['''RagTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ '''RagModel''', '''RagPreTrainedModel''', '''RagSequenceForGeneration''', '''RagTokenForGeneration''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Union[str, Any] = [ '''TFRagModel''', '''TFRagPreTrainedModel''', '''TFRagSequenceForGeneration''', '''TFRagTokenForGeneration''', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
106
1
"""simple docstring""" def __a ( _SCREAMING_SNAKE_CASE ) ->bool: if p < 2: raise ValueError('p should not be less than 2!' ) elif p == 2: return True a__: Optional[Any] = 4 a__: List[str] = (1 << p) - 1 for _ in range(p - 2 ): a__: str = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
203
"""simple docstring""" import argparse import math import traceback import dateutil.parser as date_parser import requests def __a ( _SCREAMING_SNAKE_CASE ) ->Tuple: a__: Tuple = {} a__: Tuple = job['started_at'] a__: int = job['completed_at'] a__: Any = date_parser.parse(_SCREAMING_SNAKE_CASE ) a__: Tuple = date_parser.parse(_SCREAMING_SNAKE_CASE ) a__: str = round((end_datetime - start_datetime).total_seconds() / 60.0 ) a__: Any = start a__: Dict = end a__: Optional[int] = duration_in_min return job_info def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]: a__: Tuple = None if token is not None: a__: List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'} a__: int = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100' a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() a__: str = {} try: job_time.update({job['name']: extract_time_from_single_job(_SCREAMING_SNAKE_CASE ) for job in result['jobs']} ) a__: Dict = math.ceil((result['total_count'] - 100) / 100 ) for i in range(_SCREAMING_SNAKE_CASE ): a__: str = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json() job_time.update({job['name']: extract_time_from_single_job(_SCREAMING_SNAKE_CASE ) for job in result['jobs']} ) return job_time except Exception: print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') lowercase__ = parser.parse_args() lowercase__ = get_job_time(args.workflow_run_id) lowercase__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f"{k}: {v['duration']}")
203
1
'''simple docstring''' A__ : Any =[4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A__ : str =[3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A__ : str ={ 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" assert len(str(lowerCAmelCase ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: _lowerCAmelCase = year // 1_00 _lowerCAmelCase = (5 * (century % 4) + 2) % 7 _lowerCAmelCase = year % 1_00 _lowerCAmelCase = centurian % 12 _lowerCAmelCase = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 _lowerCAmelCase = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) _lowerCAmelCase = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
70
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() __magic_name__ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __magic_name__ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.encoder.norm.weight", "encoder.layernorm.weight"), ("transformer.encoder.norm.bias", "encoder.layernorm.bias"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = val def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: __SCREAMING_SNAKE_CASE = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) __SCREAMING_SNAKE_CASE = value else: __SCREAMING_SNAKE_CASE = value return new_state_dict def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = """""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) __SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) __SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict __SCREAMING_SNAKE_CASE = in_proj_weight[:256, :] __SCREAMING_SNAKE_CASE = in_proj_bias[:256] __SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :] __SCREAMING_SNAKE_CASE = in_proj_bias[256:512] __SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :] __SCREAMING_SNAKE_CASE = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention __SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" ) __SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict __SCREAMING_SNAKE_CASE = in_proj_weight[:256, :] __SCREAMING_SNAKE_CASE = in_proj_bias[:256] __SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :] __SCREAMING_SNAKE_CASE = in_proj_bias[256:512] __SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :] __SCREAMING_SNAKE_CASE = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention __SCREAMING_SNAKE_CASE = state_dict.pop( f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" ) __SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" ) # next, add query, keys and values (in that order) of cross-attention to the state dict __SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[:256, :] __SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[:256] __SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[256:512, :] __SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[256:512] __SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[-256:, :] __SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[-256:] def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = image.size __SCREAMING_SNAKE_CASE = max(UpperCamelCase_ , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = 800 if """detection""" in checkpoint_url else 1000 __SCREAMING_SNAKE_CASE = target_max_size / current_max_size __SCREAMING_SNAKE_CASE = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = F.to_tensor(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = F.normalize(UpperCamelCase_ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): logger.info("""Converting model...""" ) # load original state dict __SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="""cpu""" ) # rename keys for src, dest in rename_keys: rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = rename_backbone_keys(UpperCamelCase_ ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCamelCase_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them __SCREAMING_SNAKE_CASE = """model.""" for key in state_dict.copy().keys(): if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): __SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = val # create HuggingFace model and load state dict __SCREAMING_SNAKE_CASE = TableTransformerConfig( backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: __SCREAMING_SNAKE_CASE = 15 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = {0: """table""", 1: """table rotated"""} __SCREAMING_SNAKE_CASE = idalabel __SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} else: __SCREAMING_SNAKE_CASE = 125 __SCREAMING_SNAKE_CASE = 6 __SCREAMING_SNAKE_CASE = { 0: """table""", 1: """table column""", 2: """table row""", 3: """table column header""", 4: """table projected row header""", 5: """table spanning cell""", } __SCREAMING_SNAKE_CASE = idalabel __SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE = DetrImageProcessor( format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 ) __SCREAMING_SNAKE_CASE = TableTransformerForObjectDetection(UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) model.eval() # verify our conversion __SCREAMING_SNAKE_CASE = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png""" __SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = Image.open(UpperCamelCase_ ).convert("""RGB""" ) __SCREAMING_SNAKE_CASE = normalize(resize(UpperCamelCase_ , UpperCamelCase_ ) ).unsqueeze(0 ) __SCREAMING_SNAKE_CASE = model(UpperCamelCase_ ) if "detection" in checkpoint_url: __SCREAMING_SNAKE_CASE = (1, 15, 3) __SCREAMING_SNAKE_CASE = torch.tensor( [[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] ) else: __SCREAMING_SNAKE_CASE = (1, 125, 7) __SCREAMING_SNAKE_CASE = torch.tensor( [[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase_ , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) image_processor.save_pretrained(UpperCamelCase_ ) if push_to_hub: # Push model to HF hub logger.info("""Pushing model to the hub...""" ) __SCREAMING_SNAKE_CASE = ( """microsoft/table-transformer-detection""" if """detection""" in checkpoint_url else """microsoft/table-transformer-structure-recognition""" ) model.push_to_hub(UpperCamelCase_ ) image_processor.push_to_hub(UpperCamelCase_ ) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", type=str, choices=[ "https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", "https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth", ], help="URL of the Table Transformer checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) __magic_name__ = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
100
0
'''simple docstring''' from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract __a = logging.get_logger(__name__) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: snake_case__ : str = to_pil_image(_lowerCAmelCase ) snake_case__ : Optional[int] = pil_image.size snake_case__ : int = pytesseract.image_to_data(_lowerCAmelCase , lang=_lowerCAmelCase , output_type="""dict""" , config=_lowerCAmelCase ) snake_case__ : List[Any] = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""] # filter empty words and corresponding coordinates snake_case__ : Optional[Any] = [idx for idx, word in enumerate(_lowerCAmelCase ) if not word.strip()] snake_case__ : Dict = [word for idx, word in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices] snake_case__ : List[str] = [coord for idx, coord in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices] snake_case__ : Union[str, Any] = [coord for idx, coord in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices] snake_case__ : int = [coord for idx, coord in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices] snake_case__ : List[Any] = [coord for idx, coord in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format snake_case__ : Tuple = [] for x, y, w, h in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): snake_case__ : Optional[int] = [x, y, x + w, y + h] actual_boxes.append(_lowerCAmelCase ) # finally, normalize the bounding boxes snake_case__ : List[Any] = [] for box in actual_boxes: normalized_boxes.append(normalize_box(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ), "Not as many words as there are bounding boxes" return words, normalized_boxes class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = ["pixel_values"] def __init__( self : Tuple , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : bool = True , snake_case_ : float = 1 / 255 , snake_case_ : bool = True , snake_case_ : Union[float, Iterable[float]] = None , snake_case_ : Union[float, Iterable[float]] = None , snake_case_ : bool = True , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = "" , **snake_case_ : Dict , ): super().__init__(**snake_case_ ) snake_case__ : Tuple = size if size is not None else {"""height""": 224, """width""": 224} snake_case__ : Union[str, Any] = get_size_dict(snake_case_ ) snake_case__ : Optional[Any] = do_resize snake_case__ : Tuple = size snake_case__ : Tuple = resample snake_case__ : Any = do_rescale snake_case__ : str = rescale_value snake_case__ : Union[str, Any] = do_normalize snake_case__ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case__ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD snake_case__ : str = apply_ocr snake_case__ : Tuple = ocr_lang snake_case__ : Tuple = tesseract_config def lowerCamelCase ( self : List[Any] , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Tuple , ): snake_case__ : Optional[Any] = get_size_dict(snake_case_ ) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" ) snake_case__ : str = (size["""height"""], size["""width"""]) return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ ) def lowerCamelCase ( self : Tuple , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Union[str, Any] , ): return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ ) def lowerCamelCase ( self : List[Any] , snake_case_ : np.ndarray , snake_case_ : Union[float, Iterable[float]] , snake_case_ : Union[float, Iterable[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Tuple , ): return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ ) def lowerCamelCase ( self : List[str] , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : Union[str, Any]=None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Union[float, Iterable[float]] = None , snake_case_ : Union[float, Iterable[float]] = None , snake_case_ : bool = None , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : Optional[Any] , ): snake_case__ : str = do_resize if do_resize is not None else self.do_resize snake_case__ : Tuple = size if size is not None else self.size snake_case__ : Tuple = get_size_dict(snake_case_ ) snake_case__ : Optional[Any] = resample if resample is not None else self.resample snake_case__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale snake_case__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize snake_case__ : Any = image_mean if image_mean is not None else self.image_mean snake_case__ : Optional[Any] = image_std if image_std is not None else self.image_std snake_case__ : List[str] = apply_ocr if apply_ocr is not None else self.apply_ocr snake_case__ : Tuple = ocr_lang if ocr_lang is not None else self.ocr_lang snake_case__ : Union[str, Any] = tesseract_config if tesseract_config is not None else self.tesseract_config snake_case__ : Dict = make_list_of_images(snake_case_ ) if not valid_images(snake_case_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" ) # All transformations expect numpy arrays. snake_case__ : List[Any] = [to_numpy_array(snake_case_ ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , """pytesseract""" ) snake_case__ : Optional[int] = [] snake_case__ : Dict = [] for image in images: snake_case__ : List[str] = apply_tesseract(snake_case_ , snake_case_ , snake_case_ ) words_batch.append(snake_case_ ) boxes_batch.append(snake_case_ ) if do_resize: snake_case__ : Tuple = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images] if do_rescale: snake_case__ : str = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images] if do_normalize: snake_case__ : Optional[int] = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images] snake_case__ : List[str] = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images] snake_case__ : Optional[int] = BatchFeature(data={"""pixel_values""": images} , tensor_type=snake_case_ ) if apply_ocr: snake_case__ : Tuple = words_batch snake_case__ : Union[str, Any] = boxes_batch return data
368
'''simple docstring''' import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase ( self : int ): snake_case__ : List[str] = """hf-internal-testing/tiny-random-t5""" snake_case__ : Any = AutoTokenizer.from_pretrained(snake_case_ ) snake_case__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ) snake_case__ : Union[str, Any] = tokenizer("""This is me""" , return_tensors="""pt""" ) snake_case__ : str = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) snake_case__ : Optional[int] = model.generate(**snake_case_ ) snake_case__ : Any = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case_ ) snake_case__ : int = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) snake_case__ : Optional[Any] = model_reloaded.generate(**snake_case_ ) self.assertTrue(torch.allclose(snake_case_ , snake_case_ ) ) def lowerCamelCase ( self : List[Any] ): snake_case__ : Optional[Any] = """hf-internal-testing/tiny-random-t5""" snake_case__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ) snake_case__ : int = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(snake_case_ ): model.save_pretrained(snake_case_ ) snake_case__ : int = model.reverse_bettertransformer() model.save_pretrained(snake_case_ )
43
0
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" __lowercase : Optional[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __lowercase : Any = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = TextaTextGenerationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__) return generator, ["Something to write", "Something else"] def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = generator("""Something there""") self.assertEqual(lowerCAmelCase__ , [{"""generated_text""": ANY(lowerCAmelCase__)}]) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""")) __SCREAMING_SNAKE_CASE = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=lowerCAmelCase__) self.assertEqual( lowerCAmelCase__ , [ [{"""generated_text""": ANY(lowerCAmelCase__)}, {"""generated_text""": ANY(lowerCAmelCase__)}], [{"""generated_text""": ANY(lowerCAmelCase__)}, {"""generated_text""": ANY(lowerCAmelCase__)}], ] , ) __SCREAMING_SNAKE_CASE = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase__) self.assertEqual( lowerCAmelCase__ , [ [{"""generated_text""": ANY(lowerCAmelCase__)}, {"""generated_text""": ANY(lowerCAmelCase__)}], [{"""generated_text""": ANY(lowerCAmelCase__)}, {"""generated_text""": ANY(lowerCAmelCase__)}], ] , ) with self.assertRaises(lowerCAmelCase__): generator(4) @require_torch def snake_case_ ( self): __SCREAMING_SNAKE_CASE = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""") # do_sample=False necessary for reproducibility __SCREAMING_SNAKE_CASE = generator("""Something there""" , do_sample=lowerCAmelCase__) self.assertEqual(lowerCAmelCase__ , [{"""generated_text""": """"""}]) __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = generator( """Something there""" , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , ) __SCREAMING_SNAKE_CASE = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__) __SCREAMING_SNAKE_CASE = generator("""This is a test""" , do_sample=lowerCAmelCase__ , num_return_sequences=2 , return_tensors=lowerCAmelCase__) self.assertEqual( lowerCAmelCase__ , [ {"""generated_token_ids""": ANY(torch.Tensor)}, {"""generated_token_ids""": ANY(torch.Tensor)}, ] , ) __SCREAMING_SNAKE_CASE = generator.model.config.eos_token_id __SCREAMING_SNAKE_CASE = """<pad>""" __SCREAMING_SNAKE_CASE = generator( ["""This is a test""", """This is a second test"""] , do_sample=lowerCAmelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase__ , ) self.assertEqual( lowerCAmelCase__ , [ [ {"""generated_token_ids""": ANY(torch.Tensor)}, {"""generated_token_ids""": ANY(torch.Tensor)}, ], [ {"""generated_token_ids""": ANY(torch.Tensor)}, {"""generated_token_ids""": ANY(torch.Tensor)}, ], ] , ) @require_tf def snake_case_ ( self): __SCREAMING_SNAKE_CASE = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""") # do_sample=False necessary for reproducibility __SCREAMING_SNAKE_CASE = generator("""Something there""" , do_sample=lowerCAmelCase__) self.assertEqual(lowerCAmelCase__ , [{"""generated_text""": """"""}])
100
"""simple docstring""" def snake_case_ ( A_ : list[int], A_ : str ): '''simple docstring''' _lowerCamelCase : Tuple = int(A_ ) # Initialize Result _lowerCamelCase : Dict = [] # Traverse through all denomination for denomination in reversed(A_ ): # Find denominations while int(A_ ) >= int(A_ ): total_value -= int(A_ ) answer.append(A_ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": lowerCAmelCase__ = [] lowerCAmelCase__ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000] lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F"""Following is minimal change for {value}: """) lowerCAmelCase__ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
72
0
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter __A = True except ImportError: __A = False __A = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCamelCase_ ( UpperCamelCase__ : Namespace ) -> List[str]: """simple docstring""" return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @staticmethod def lowercase_ ( lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = parser.add_parser('add-new-model' ) add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' ) add_new_model_parser.add_argument('--testing_file' , type=lowerCamelCase__ , help='Configuration file on which to run.' ) add_new_model_parser.add_argument( '--path' , type=lowerCamelCase__ , help='Path to cookiecutter. Should only be used for testing purposes.' ) add_new_model_parser.set_defaults(func=lowerCamelCase__ ) def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , *lowerCamelCase__ ) -> List[Any]: '''simple docstring''' __lowerCamelCase = testing __lowerCamelCase = testing_file __lowerCamelCase = path def lowercase_ ( self ) -> List[Any]: '''simple docstring''' warnings.warn( 'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ' 'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ' 'checks, you should use `transformers-cli add-new-model-like` instead.' ) if not _has_cookiecutter: raise ImportError( 'Model creation dependencies are required to use the `add_new_model` command. Install them by running ' 'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory __lowerCamelCase = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]] if len(lowerCamelCase__ ) > 0: raise ValueError( 'Several directories starting with `cookiecutter-template-` in current working directory. ' 'Please clean your directory by removing all folders starting with `cookiecutter-template-` or ' 'change your working directory.' ) __lowerCamelCase = ( Path(lowerCamelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) __lowerCamelCase = path_to_transformer_root / 'templates' / 'adding_a_new_model' # Execute cookiecutter if not self._testing: cookiecutter(str(lowerCamelCase__ ) ) else: with open(self._testing_file , 'r' ) as configuration_file: __lowerCamelCase = json.load(lowerCamelCase__ ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCamelCase__ , extra_context=lowerCamelCase__ , ) __lowerCamelCase = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0] # Retrieve configuration with open(directory + '/configuration.json' , 'r' ) as configuration_file: __lowerCamelCase = json.load(lowerCamelCase__ ) __lowerCamelCase = configuration['lowercase_modelname'] __lowerCamelCase = configuration['generate_tensorflow_pytorch_and_flax'] os.remove(f"""{directory}/configuration.json""" ) __lowerCamelCase = 'PyTorch' in generate_tensorflow_pytorch_and_flax __lowerCamelCase = 'TensorFlow' in generate_tensorflow_pytorch_and_flax __lowerCamelCase = 'Flax' in generate_tensorflow_pytorch_and_flax __lowerCamelCase = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}""" os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=lowerCamelCase__ ) # Tests require submodules as they have parent imports with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , 'w' ): pass shutil.move( f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , ) shutil.move( f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , ) def remove_copy_lines(lowerCamelCase__ ): with open(lowerCamelCase__ , 'r' ) as f: __lowerCamelCase = f.readlines() with open(lowerCamelCase__ , 'w' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(lowerCamelCase__ ) if output_pytorch: if not self._testing: remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" ) os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" ) if output_tensorflow: if not self._testing: remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" ) os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" ) if output_flax: if not self._testing: remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" ) os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , ) shutil.move( f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Create temp file __lowerCamelCase , __lowerCamelCase = mkstemp() __lowerCamelCase = False with fdopen(lowerCamelCase__ , 'w' ) as new_file: with open(lowerCamelCase__ ) as old_file: for line in old_file: new_file.write(lowerCamelCase__ ) if line_to_copy_below in line: __lowerCamelCase = True for line_to_copy in lines_to_copy: new_file.write(lowerCamelCase__ ) if not line_found: raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" ) # Copy the file permissions from the old file to the new file copymode(lowerCamelCase__ , lowerCamelCase__ ) # Remove original file remove(lowerCamelCase__ ) # Move new file move(lowerCamelCase__ , lowerCamelCase__ ) def skip_units(lowerCamelCase__ ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(lowerCamelCase__ ): with open(lowerCamelCase__ ) as datafile: __lowerCamelCase = [] __lowerCamelCase = False __lowerCamelCase = False for line in datafile: if "# To replace in: " in line and "##" not in line: __lowerCamelCase = line.split('"' )[1] __lowerCamelCase = skip_units(lowerCamelCase__ ) elif "# Below: " in line and "##" not in line: __lowerCamelCase = line.split('"' )[1] __lowerCamelCase = skip_units(lowerCamelCase__ ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = [] elif "# Replace with" in line and "##" not in line: __lowerCamelCase = [] elif "##" not in line: lines_to_copy.append(lowerCamelCase__ ) remove(lowerCamelCase__ ) replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" ) os.rmdir(lowerCamelCase__ )
348
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ) -> int: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = rotary_dim __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = initializer_range __lowerCamelCase = None __lowerCamelCase = vocab_size - 1 __lowerCamelCase = vocab_size - 1 __lowerCamelCase = vocab_size - 1 def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = 20 __lowerCamelCase = model_class_name(lowerCamelCase__ ) __lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ ) __lowerCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' ) __lowerCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCamelCase = model( input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , ) __lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __lowerCamelCase = model( input_ids[:, -1:] , attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase__ , ) __lowerCamelCase = model(lowerCamelCase__ ) __lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = 20 __lowerCamelCase = model_class_name(lowerCamelCase__ ) __lowerCamelCase = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) __lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ ) __lowerCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCamelCase = model( input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , ) __lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __lowerCamelCase = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase__ , position_ids=lowerCamelCase__ , ) __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) __lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () snake_case_ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = FlaxGPTJModelTester(self ) def lowercase_ ( self ) -> str: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) @tooslow def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' ) __lowerCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ ) __lowerCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' ) __lowerCamelCase = False __lowerCamelCase = model.config.eos_token_id __lowerCamelCase = jax.jit(model.generate ) __lowerCamelCase = jit_generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences __lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ ) __lowerCamelCase = [ 'Hello this is a long string of text.\n\nI\'m trying to get the text of the', 'Hey, I\'m a little late to the party. I\'m going to', ] self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @is_pt_flax_cross_test def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape __lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase__ ): __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval() __lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa ) __lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ ) __lowerCamelCase = fx_state with torch.no_grad(): __lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple() __lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple() self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase__ ) __lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ ) __lowerCamelCase = fx_model_loaded(**lowerCamelCase__ ).to_tuple() self.assertEqual( len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval() __lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa ) __lowerCamelCase = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params ) __lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape __lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase__ ): __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = 0 __lowerCamelCase = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): __lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple() __lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple() self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase__ ) __lowerCamelCase = pt_model_class.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ ) with torch.no_grad(): __lowerCamelCase = pt_model_loaded(**lowerCamelCase__ ).to_tuple() self.assertEqual( len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def lowercase_ ( self ) -> List[str]: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' ) __lowerCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase__ )
348
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowercase ( _UpperCAmelCase , unittest.TestCase ): _SCREAMING_SNAKE_CASE = ShapEImgaImgPipeline _SCREAMING_SNAKE_CASE = ['image'] _SCREAMING_SNAKE_CASE = ['image'] _SCREAMING_SNAKE_CASE = [ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] _SCREAMING_SNAKE_CASE = False @property def _snake_case ( self ) -> int: return 32 @property def _snake_case ( self ) -> Union[str, Any]: return 32 @property def _snake_case ( self ) -> List[str]: return self.time_input_dim * 4 @property def _snake_case ( self ) -> int: return 8 @property def _snake_case ( self ) -> List[Any]: torch.manual_seed(0 ) lowerCAmelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) lowerCAmelCase = CLIPVisionModel(lowercase ) return model @property def _snake_case ( self ) -> Optional[Any]: lowerCAmelCase = CLIPImageProcessor( crop_size=224 , do_center_crop=lowercase , do_normalize=lowercase , do_resize=lowercase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , ) return image_processor @property def _snake_case ( self ) -> Any: torch.manual_seed(0 ) lowerCAmelCase = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """embedding_proj_norm_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } lowerCAmelCase = PriorTransformer(**lowercase ) return model @property def _snake_case ( self ) -> Optional[int]: torch.manual_seed(0 ) lowerCAmelCase = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } lowerCAmelCase = ShapERenderer(**lowercase ) return model def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = self.dummy_prior lowerCAmelCase = self.dummy_image_encoder lowerCAmelCase = self.dummy_image_processor lowerCAmelCase = self.dummy_renderer lowerCAmelCase = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=1_024 , prediction_type="""sample""" , use_karras_sigmas=lowercase , clip_sample=lowercase , clip_sample_range=1.0 , ) lowerCAmelCase = { """prior""": prior, """image_encoder""": image_encoder, """image_processor""": image_processor, """renderer""": renderer, """scheduler""": scheduler, } return components def _snake_case ( self , lowercase , lowercase=0 ) -> int: lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase ) if str(lowercase ).startswith("""mps""" ): lowerCAmelCase = torch.manual_seed(lowercase ) else: lowerCAmelCase = torch.Generator(device=lowercase ).manual_seed(lowercase ) lowerCAmelCase = { """image""": input_image, """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def _snake_case ( self ) -> List[Any]: lowerCAmelCase = """cpu""" lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = self.pipeline_class(**lowercase ) lowerCAmelCase = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) lowerCAmelCase = pipe(**self.get_dummy_inputs(lowercase ) ) lowerCAmelCase = output.images[0] lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowerCAmelCase = np.array( [ 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self ) -> Union[str, Any]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def _snake_case ( self ) -> int: lowerCAmelCase = torch_device == """cpu""" lowerCAmelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=lowercase , relax_max_difference=lowercase , ) def _snake_case ( self ) -> List[Any]: lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = self.pipeline_class(**lowercase ) lowerCAmelCase = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) lowerCAmelCase = 1 lowerCAmelCase = 2 lowerCAmelCase = self.get_dummy_inputs(lowercase ) for key in inputs.keys(): if key in self.batch_params: lowerCAmelCase = batch_size * [inputs[key]] lowerCAmelCase = pipe(**lowercase , num_images_per_prompt=lowercase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowercase ( unittest.TestCase ): def _snake_case ( self ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" ) lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_img2img_out.npy""" ) lowerCAmelCase = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" ) lowerCAmelCase = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) lowerCAmelCase = torch.Generator(device=lowercase ).manual_seed(0 ) lowerCAmelCase = pipe( lowercase , generator=lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowercase , lowercase )
46
"""simple docstring""" import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {"vocab_file": "spiece.model"} SCREAMING_SNAKE_CASE__ = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } SCREAMING_SNAKE_CASE__ = { "AI-Sweden/gpt-sw3-126m": 2_048, "AI-Sweden/gpt-sw3-350m": 2_048, "AI-Sweden/gpt-sw3-1.6b": 2_048, "AI-Sweden/gpt-sw3-6.7b": 2_048, "AI-Sweden/gpt-sw3-20b": 2_048, } class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask'] def __init__( self , lowercase , lowercase=False , lowercase=False , lowercase=False , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase = None , **lowercase , ) -> None: lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase = kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) lowerCAmelCase = """None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing lowerCAmelCase = """<|endoftext|>""" if eos_token is None else eos_token lowerCAmelCase = """<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: lowerCAmelCase = unk_token if pad_token is None else pad_token lowerCAmelCase = eos_token if bos_token is None else bos_token else: lowerCAmelCase = """<pad>""" if pad_token is None else pad_token lowerCAmelCase = """<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , ) lowerCAmelCase = do_lower_case lowerCAmelCase = remove_space lowerCAmelCase = keep_accents lowerCAmelCase = vocab_file lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase ) # Used for whitespace normalization in input texts # fmt : off lowerCAmelCase = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing lowerCAmelCase = re.compile( f'[{"".join(map(lowercase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]' ) def __getstate__( self ) -> Optional[int]: lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self , lowercase ) -> str: lowerCAmelCase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _snake_case ( self ) -> int: return len(self.sp_model ) def _snake_case ( self , lowercase ) -> str: lowerCAmelCase = self.non_printing_characters_re.sub("""""" , lowercase ) # Normalize whitespaces lowerCAmelCase = """""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization lowerCAmelCase = unicodedata.normalize("""NFC""" , lowercase ) return text def _snake_case ( self , lowercase , **lowercase ) -> List[str]: lowerCAmelCase = self.preprocess_text(lowercase ) return self.sp_model.encode(lowercase , out_type=lowercase ) def _snake_case ( self , lowercase ) -> int: return self.sp_model.PieceToId(lowercase ) def _snake_case ( self , lowercase ) -> str: return self.sp_model.IdToPiece(lowercase ) @staticmethod def _snake_case ( lowercase ) -> str: return out_string def _snake_case ( self , lowercase ) -> str: lowerCAmelCase = [] lowerCAmelCase = """""" lowerCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowercase ) + token lowerCAmelCase = True lowerCAmelCase = [] else: current_sub_tokens.append(lowercase ) lowerCAmelCase = False out_string += self.sp_model.decode(lowercase ) return out_string def _snake_case ( self ) -> Dict[str, int]: lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]: if not os.path.isdir(lowercase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowerCAmelCase = os.path.join( lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase ) elif not os.path.isfile(self.vocab_file ): with open(lowercase , """wb""" ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(lowercase ) return (out_vocab_file,) def _snake_case ( self , lowercase , lowercase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(lowercase , lowercase ): lowerCAmelCase = self.preprocess_text(lowercase ) lowerCAmelCase = self.sp_model.encode(lowercase ) else: lowerCAmelCase = [self.preprocess_text(lowercase ) for t in text] lowerCAmelCase = self.sp_model.encode(lowercase ) if return_tensors is True or return_tensors == "pt": lowerCAmelCase = torch.tensor(lowercase ) return token_ids def _snake_case ( self , lowercase ) -> str: return self.sp_model.decode(lowercase ) def _snake_case ( self , lowercase ) -> List[int]: lowerCAmelCase = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()] lowerCAmelCase = ( f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowercase ) + f'{self.bos_token}Bot:' ) return self.encode(text=lowercase )
46
1
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __a : def __init__( self , a__ , a__=3 , a__=32 , a__=3 , a__=10 , a__=[10, 20, 30, 40] , a__=[1, 1, 2, 1] , a__=True , a__=True , a__="relu" , a__=3 , a__=None , ): _lowerCamelCase = parent _lowerCamelCase = batch_size _lowerCamelCase = image_size _lowerCamelCase = num_channels _lowerCamelCase = embeddings_size _lowerCamelCase = hidden_sizes _lowerCamelCase = depths _lowerCamelCase = is_training _lowerCamelCase = use_labels _lowerCamelCase = hidden_act _lowerCamelCase = num_labels _lowerCamelCase = scope _lowerCamelCase = len(__UpperCAmelCase ) def snake_case_ ( self ): _lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCamelCase = None if self.use_labels: _lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels ) _lowerCamelCase = self.get_config() return config, pixel_values, labels def snake_case_ ( self ): return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def snake_case_ ( self , a__ , a__ , a__ ): _lowerCamelCase = TFRegNetModel(config=__UpperCAmelCase ) _lowerCamelCase = model(__UpperCAmelCase , training=__UpperCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def snake_case_ ( self , a__ , a__ , a__ ): _lowerCamelCase = self.num_labels _lowerCamelCase = TFRegNetForImageClassification(__UpperCAmelCase ) _lowerCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case_ ( self ): _lowerCamelCase = self.prepare_config_and_inputs() _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs _lowerCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class __a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE__ : Any = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () SCREAMING_SNAKE_CASE__ : Optional[int] = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : Optional[Any] = False SCREAMING_SNAKE_CASE__ : str = False SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : str = False def snake_case_ ( self ): _lowerCamelCase = TFRegNetModelTester(self ) _lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase ) def snake_case_ ( self ): return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def snake_case_ ( self ): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def snake_case_ ( self ): super().test_keras_fit() @unittest.skip(reason='RegNet does not support input and output embeddings' ) def snake_case_ ( self ): pass def snake_case_ ( self ): _lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase = model_class(__UpperCAmelCase ) _lowerCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase = [*signature.parameters.keys()] _lowerCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def snake_case_ ( self ): _lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def snake_case_ ( self ): def check_hidden_states_output(a__ , a__ , a__ ): _lowerCamelCase = model_class(__UpperCAmelCase ) _lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase ) _lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowerCamelCase = self.model_tester.num_stages self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) _lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: _lowerCamelCase = layer_type _lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def snake_case_ ( self ): _lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(a__ , a__ , a__ , a__={} ): _lowerCamelCase = model(__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase ) _lowerCamelCase = model(__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase ).to_tuple() def recursive_check(a__ , a__ ): if isinstance(__UpperCAmelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__UpperCAmelCase , __UpperCAmelCase ): recursive_check(__UpperCAmelCase , __UpperCAmelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__UpperCAmelCase , __UpperCAmelCase ) ) , msg=( 'Tuple and dict output are not equal. Difference:' F' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}' ) , ) recursive_check(__UpperCAmelCase , __UpperCAmelCase ) for model_class in self.all_model_classes: _lowerCamelCase = model_class(__UpperCAmelCase ) _lowerCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) _lowerCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _lowerCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) _lowerCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _lowerCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) _lowerCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {'output_hidden_states': True} ) _lowerCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) _lowerCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {'output_hidden_states': True} ) def snake_case_ ( self ): _lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def snake_case_ ( self ): for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase = TFRegNetModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( )-> List[str]: _lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class __a ( unittest.TestCase ): @cached_property def snake_case_ ( self ): return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def snake_case_ ( self ): _lowerCamelCase = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _lowerCamelCase = self.default_image_processor _lowerCamelCase = prepare_img() _lowerCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='tf' ) # forward pass _lowerCamelCase = model(**__UpperCAmelCase , training=__UpperCAmelCase ) # verify the logits _lowerCamelCase = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) _lowerCamelCase = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 )
354
"""simple docstring""" def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> list[int]: if num <= 0: raise ValueError('Input must be a positive integer' ) _lowerCamelCase = [True] * (num + 1) _lowerCamelCase = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , snake_case ): _lowerCamelCase = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() A_ : Optional[int] =int(input("""Enter a positive integer: """).strip()) print(prime_sieve_eratosthenes(user_num))
80
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) SCREAMING_SNAKE_CASE : Dict = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : int = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
"""simple docstring""" import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def _snake_case ( *UpperCamelCase : str , UpperCamelCase : Optional[Union[Dict, Any]] = None , UpperCamelCase : Tuple=True , UpperCamelCase : Optional[int]=2 ): from .. import __version__ UpperCAmelCase : Tuple = take_from UpperCAmelCase : Optional[Any] = () if not isinstance(args[0] , UpperCamelCase ): UpperCAmelCase : List[str] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(UpperCamelCase ).base_version ) >= version.parse(UpperCamelCase ): raise ValueError( F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" F" version {__version__} is >= {version_name}" ) UpperCAmelCase : Optional[int] = None if isinstance(UpperCamelCase , UpperCamelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(UpperCamelCase ),) UpperCAmelCase : List[str] = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}." elif hasattr(UpperCamelCase , UpperCamelCase ): values += (getattr(UpperCamelCase , UpperCamelCase ),) UpperCAmelCase : List[Any] = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." elif deprecated_kwargs is None: UpperCAmelCase : int = F"`{attribute}` is deprecated and will be removed in version {version_name}." if warning is not None: UpperCAmelCase : Optional[Any] = warning + """ """ if standard_warn else """""" warnings.warn(warning + message , UpperCamelCase , stacklevel=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) > 0: UpperCAmelCase : Optional[int] = inspect.getouterframes(inspect.currentframe() )[1] UpperCAmelCase : Union[str, Any] = call_frame.filename UpperCAmelCase : List[Any] = call_frame.lineno UpperCAmelCase : List[str] = call_frame.function UpperCAmelCase , UpperCAmelCase : Optional[int] = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" ) if len(UpperCamelCase ) == 0: return elif len(UpperCamelCase ) == 1: return values[0] return values
109
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class UpperCAmelCase ( unittest.TestCase ): def __init__(self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Dict=7 , snake_case__ : int=3 , snake_case__ : str=30 , snake_case__ : Dict=4_00 , snake_case__ : Any=True , snake_case__ : Tuple=None , snake_case__ : Dict=True , snake_case__ : Dict=[0.5, 0.5, 0.5] , snake_case__ : Union[str, Any]=[0.5, 0.5, 0.5] , snake_case__ : int=True , snake_case__ : Optional[Any]=1 / 2_55 , snake_case__ : Optional[Any]=True , ) -> str: '''simple docstring''' snake_case : Dict = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33} snake_case : List[Any] = parent snake_case : Dict = batch_size snake_case : Optional[Any] = num_channels snake_case : Union[str, Any] = min_resolution snake_case : Dict = max_resolution snake_case : Dict = do_resize snake_case : List[str] = size snake_case : Dict = do_normalize snake_case : Any = image_mean snake_case : Tuple = image_std snake_case : int = do_rescale snake_case : List[Any] = rescale_factor snake_case : Any = do_pad def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Tuple , snake_case__ : Any=False ) -> Tuple: '''simple docstring''' if not batched: snake_case : Tuple = image_inputs[0] if isinstance(snake_case__ , Image.Image ): snake_case : List[Any] = image.size else: snake_case : Any = image.shape[1], image.shape[2] if w < h: snake_case : Optional[Any] = int(self.size["shortest_edge"] * h / w ) snake_case : Optional[Any] = self.size["shortest_edge"] elif w > h: snake_case : List[str] = self.size["shortest_edge"] snake_case : Dict = int(self.size["shortest_edge"] * w / h ) else: snake_case : List[Any] = self.size["shortest_edge"] snake_case : List[str] = self.size["shortest_edge"] else: snake_case : List[str] = [] for image in image_inputs: snake_case : List[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case : Tuple = max(snake_case__ , key=lambda snake_case__ : item[0] )[0] snake_case : Tuple = max(snake_case__ , key=lambda snake_case__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class UpperCAmelCase ( A_ ,unittest.TestCase ): A__ : str = DetaImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE (self : Any ) -> List[str]: '''simple docstring''' snake_case : Optional[Any] = DetaImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any: '''simple docstring''' snake_case : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "image_mean" ) ) self.assertTrue(hasattr(snake_case__ , "image_std" ) ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_resize" ) ) self.assertTrue(hasattr(snake_case__ , "do_rescale" ) ) self.assertTrue(hasattr(snake_case__ , "do_pad" ) ) self.assertTrue(hasattr(snake_case__ , "size" ) ) def _SCREAMING_SNAKE_CASE (self : str ) -> Any: '''simple docstring''' snake_case : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} ) self.assertEqual(image_processor.do_pad , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[int]: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]: '''simple docstring''' snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case : str = self.image_processor_tester.get_expected_values(snake_case__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case : Dict = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ ) snake_case : int = image_processing(snake_case__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Tuple: '''simple docstring''' snake_case : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , np.ndarray ) # Test not batched input snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(snake_case__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case : Tuple = image_processing(snake_case__ , return_tensors="pt" ).pixel_values snake_case : Optional[int] = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _SCREAMING_SNAKE_CASE (self : str ) -> int: '''simple docstring''' snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) # Test not batched input snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case : List[str] = self.image_processor_tester.get_expected_values(snake_case__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case : List[Any] = image_processing(snake_case__ , return_tensors="pt" ).pixel_values snake_case : str = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any: '''simple docstring''' snake_case : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: snake_case : Tuple = json.loads(f.read() ) snake_case : int = {"image_id": 3_97_69, "annotations": target} # encode them snake_case : Union[str, Any] = DetaImageProcessor() snake_case : Optional[int] = image_processing(images=snake_case__ , annotations=snake_case__ , return_tensors="pt" ) # verify pixel values snake_case : Union[str, Any] = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , snake_case__ ) snake_case : Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , snake_case__ , atol=1e-4 ) ) # verify area snake_case : List[str] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , snake_case__ ) ) # verify boxes snake_case : Any = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , snake_case__ ) snake_case : Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , snake_case__ , atol=1e-3 ) ) # verify image_id snake_case : Union[str, Any] = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , snake_case__ ) ) # verify is_crowd snake_case : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , snake_case__ ) ) # verify class_labels snake_case : int = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , snake_case__ ) ) # verify orig_size snake_case : Dict = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , snake_case__ ) ) # verify size snake_case : Dict = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , snake_case__ ) ) @slow def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[Any]: '''simple docstring''' snake_case : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: snake_case : List[Any] = json.loads(f.read() ) snake_case : Any = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target} snake_case : Any = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them snake_case : Tuple = DetaImageProcessor(format="coco_panoptic" ) snake_case : int = image_processing(images=snake_case__ , annotations=snake_case__ , masks_path=snake_case__ , return_tensors="pt" ) # verify pixel values snake_case : int = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , snake_case__ ) snake_case : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , snake_case__ , atol=1e-4 ) ) # verify area snake_case : str = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , snake_case__ ) ) # verify boxes snake_case : Any = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , snake_case__ ) snake_case : List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , snake_case__ , atol=1e-3 ) ) # verify image_id snake_case : List[Any] = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , snake_case__ ) ) # verify is_crowd snake_case : int = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , snake_case__ ) ) # verify class_labels snake_case : List[Any] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , snake_case__ ) ) # verify masks snake_case : List[str] = 82_28_73 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , snake_case__ ) # verify orig_size snake_case : Optional[int] = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , snake_case__ ) ) # verify size snake_case : int = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , snake_case__ ) )
363
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowerCamelCase = { """configuration_pix2struct""": [ """PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Pix2StructConfig""", """Pix2StructTextConfig""", """Pix2StructVisionConfig""", ], """processing_pix2struct""": ["""Pix2StructProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""Pix2StructImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Pix2StructPreTrainedModel""", """Pix2StructForConditionalGeneration""", """Pix2StructVisionModel""", """Pix2StructTextModel""", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
10
0
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def __lowercase ( snake_case_ : Optional[int] ,snake_case_ : str=0.999 ,snake_case_ : str="cosine" ,) ->List[str]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case_ : Union[str, Any] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case_ : List[str] ): return math.exp(t * -12.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) __A : List[Any] = [] for i in range(snake_case_ ): __A : List[Any] = i / num_diffusion_timesteps __A : Any = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) ,snake_case_ ) ) return torch.tensor(snake_case_ ,dtype=torch.floataa ) class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = [e.name for e in KarrasDiffusionSchedulers] _lowerCamelCase = 2 @register_to_config def __init__( self , __lowerCamelCase = 1000 , __lowerCamelCase = 0.0_0_0_8_5 , __lowerCamelCase = 0.0_1_2 , __lowerCamelCase = "linear" , __lowerCamelCase = None , __lowerCamelCase = "epsilon" , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = 1.0 , __lowerCamelCase = "linspace" , __lowerCamelCase = 0 , ): '''simple docstring''' if trained_betas is not None: __A : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.floataa ) elif beta_schedule == "linear": __A : str = torch.linspace(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __A : Tuple = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCamelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __A : Optional[Any] = betas_for_alpha_bar(__lowerCamelCase , alpha_transform_type='''cosine''' ) elif beta_schedule == "exp": __A : Tuple = betas_for_alpha_bar(__lowerCamelCase , alpha_transform_type='''exp''' ) else: raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" ) __A : List[Any] = 1.0 - self.betas __A : Any = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) __A : Optional[Any] = use_karras_sigmas def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=None ): '''simple docstring''' if schedule_timesteps is None: __A : Union[str, Any] = self.timesteps __A : Optional[Any] = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: __A : Optional[Any] = 1 if len(__lowerCamelCase ) > 1 else 0 else: __A : str = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep __A : Union[str, Any] = self._index_counter[timestep_int] return indices[pos].item() @property def UpperCamelCase__( self ): '''simple docstring''' if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , ): '''simple docstring''' __A : Optional[int] = self.index_for_timestep(__lowerCamelCase ) __A : Optional[Any] = self.sigmas[step_index] __A : int = sample / ((sigma**2 + 1) ** 0.5) return sample def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , ): '''simple docstring''' __A : Any = num_inference_steps __A : Any = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": __A : List[Any] = np.linspace(0 , num_train_timesteps - 1 , __lowerCamelCase , dtype=__lowerCamelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": __A : Any = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __A : Optional[int] = (np.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": __A : int = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __A : Any = (np.arange(__lowerCamelCase , 0 , -step_ratio )).round().copy().astype(__lowerCamelCase ) timesteps -= 1 else: raise ValueError( F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) __A : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) __A : List[Any] = np.log(__lowerCamelCase ) __A : Dict = np.interp(__lowerCamelCase , np.arange(0 , len(__lowerCamelCase ) ) , __lowerCamelCase ) if self.config.use_karras_sigmas: __A : int = self._convert_to_karras(in_sigmas=__lowerCamelCase , num_inference_steps=self.num_inference_steps ) __A : Optional[int] = np.array([self._sigma_to_t(__lowerCamelCase , __lowerCamelCase ) for sigma in sigmas] ) __A : Any = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) __A : Optional[Any] = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase ) __A : List[str] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) __A : Any = torch.from_numpy(__lowerCamelCase ) __A : Dict = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(__lowerCamelCase ).startswith('''mps''' ): # mps does not support float64 __A : Optional[int] = timesteps.to(__lowerCamelCase , dtype=torch.floataa ) else: __A : int = timesteps.to(device=__lowerCamelCase ) # empty dt and derivative __A : str = None __A : List[str] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter __A : List[Any] = defaultdict(__lowerCamelCase ) def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ): '''simple docstring''' __A : int = np.log(__lowerCamelCase ) # get distribution __A : int = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range __A : List[Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) __A : Union[str, Any] = low_idx + 1 __A : Any = log_sigmas[low_idx] __A : str = log_sigmas[high_idx] # interpolate sigmas __A : Optional[int] = (low - log_sigma) / (low - high) __A : Union[str, Any] = np.clip(__lowerCamelCase , 0 , 1 ) # transform interpolation to time range __A : Tuple = (1 - w) * low_idx + w * high_idx __A : Tuple = t.reshape(sigma.shape ) return t def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ): '''simple docstring''' __A : float = in_sigmas[-1].item() __A : float = in_sigmas[0].item() __A : Optional[Any] = 7.0 # 7.0 is the value used in the paper __A : Optional[int] = np.linspace(0 , 1 , __lowerCamelCase ) __A : str = sigma_min ** (1 / rho) __A : Optional[int] = sigma_max ** (1 / rho) __A : List[str] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def UpperCamelCase__( self ): '''simple docstring''' return self.dt is None def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True , ): '''simple docstring''' __A : List[Any] = self.index_for_timestep(__lowerCamelCase ) # advance index counter by 1 __A : str = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: __A : Union[str, Any] = self.sigmas[step_index] __A : Dict = self.sigmas[step_index + 1] else: # 2nd order / Heun's method __A : str = self.sigmas[step_index - 1] __A : Any = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API __A : List[str] = 0 __A : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": __A : List[str] = sigma_hat if self.state_in_first_order else sigma_next __A : int = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": __A : Tuple = sigma_hat if self.state_in_first_order else sigma_next __A : Optional[int] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": __A : str = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.config.clip_sample: __A : List[str] = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order __A : List[Any] = (sample - pred_original_sample) / sigma_hat # 3. delta timestep __A : Union[str, Any] = sigma_next - sigma_hat # store for 2nd order step __A : Dict = derivative __A : Optional[int] = dt __A : int = sample else: # 2. 2nd order / Heun's method __A : Tuple = (sample - pred_original_sample) / sigma_next __A : Tuple = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample __A : int = self.dt __A : int = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" __A : List[str] = None __A : Union[str, Any] = None __A : Any = None __A : Optional[Any] = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__lowerCamelCase ) def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ): '''simple docstring''' __A : List[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase ): # mps does not support float64 __A : List[str] = self.timesteps.to(original_samples.device , dtype=torch.floataa ) __A : int = timesteps.to(original_samples.device , dtype=torch.floataa ) else: __A : List[Any] = self.timesteps.to(original_samples.device ) __A : Tuple = timesteps.to(original_samples.device ) __A : Union[str, Any] = [self.index_for_timestep(__lowerCamelCase , __lowerCamelCase ) for t in timesteps] __A : Dict = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): __A : List[Any] = sigma.unsqueeze(-1 ) __A : Optional[Any] = original_samples + noise * sigma return noisy_samples def __len__( self ): '''simple docstring''' return self.config.num_train_timesteps
179
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class __snake_case : """simple docstring""" _lowerCamelCase = 42 # setable values _lowerCamelCase = 42 _lowerCamelCase = 42 _lowerCamelCase = None @classmethod def UpperCamelCase__( cls , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): '''simple docstring''' return cls(common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase ) @dataclass class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = 42 class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers] _lowerCamelCase = 42 @property def UpperCamelCase__( self ): '''simple docstring''' return True @register_to_config def __init__( self , __lowerCamelCase = 1000 , __lowerCamelCase = 0.0_0_0_1 , __lowerCamelCase = 0.0_2 , __lowerCamelCase = "linear" , __lowerCamelCase = None , __lowerCamelCase = "fixed_small" , __lowerCamelCase = True , __lowerCamelCase = "epsilon" , __lowerCamelCase = jnp.floataa , ): '''simple docstring''' __A : Tuple = dtype def UpperCamelCase__( self , __lowerCamelCase = None ): '''simple docstring''' if common is None: __A : Tuple = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution __A : Tuple = jnp.array(1.0 , dtype=self.dtype ) __A : Optional[int] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase , ) def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None ): '''simple docstring''' return sample def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = () ): '''simple docstring''' __A : Optional[Any] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 __A : Optional[Any] = (jnp.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase , ) def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ): '''simple docstring''' __A : int = state.common.alphas_cumprod[t] __A : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample __A : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: __A : Dict = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": __A : List[Any] = jnp.clip(__lowerCamelCase , a_min=1e-2_0 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": __A : Optional[Any] = jnp.log(jnp.clip(__lowerCamelCase , a_min=1e-2_0 ) ) elif variance_type == "fixed_large": __A : Tuple = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log __A : Union[str, Any] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": __A : Optional[Any] = variance __A : Optional[Any] = state.common.betas[t] __A : Any = (predicted_variance + 1) / 2 __A : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True , ): '''simple docstring''' __A : Optional[int] = timestep if key is None: __A : List[Any] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: __A , __A : Tuple = jnp.split(__lowerCamelCase , sample.shape[1] , axis=1 ) else: __A : List[str] = None # 1. compute alphas, betas __A : Dict = state.common.alphas_cumprod[t] __A : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) __A : Tuple = 1 - alpha_prod_t __A : Optional[int] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": __A : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": __A : Any = model_output elif self.config.prediction_type == "v_prediction": __A : str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """ ''' for the FlaxDDPMScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: __A : str = jnp.clip(__lowerCamelCase , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __A : Optional[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t __A : Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __A : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): __A : List[Any] = jax.random.split(__lowerCamelCase , num=1 ) __A : List[str] = jax.random.normal(__lowerCamelCase , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(__lowerCamelCase , __lowerCamelCase , predicted_variance=__lowerCamelCase ) ** 0.5) * noise __A : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) __A : Any = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=__lowerCamelCase , state=__lowerCamelCase ) def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ): '''simple docstring''' return add_noise_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ): '''simple docstring''' return get_velocity_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def __len__( self ): '''simple docstring''' return self.config.num_train_timesteps
179
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor _UpperCAmelCase : int = logging.get_logger(__name__) class __lowerCAmelCase ( lowerCAmelCase): def __init__( self: Any , *_lowerCAmelCase: Optional[int] , **_lowerCAmelCase: str ): warnings.warn( "The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use FlavaImageProcessor instead." , _lowerCAmelCase , ) super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
158
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __lowerCAmelCase : def __init__( self: Dict , _lowerCAmelCase: Dict , _lowerCAmelCase: Any=13 , _lowerCAmelCase: List[str]=10 , _lowerCAmelCase: Dict=3 , _lowerCAmelCase: Dict=2 , _lowerCAmelCase: Any=2 , _lowerCAmelCase: Any=2 , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: Dict=True , _lowerCAmelCase: Optional[Any]=32 , _lowerCAmelCase: Union[str, Any]=5 , _lowerCAmelCase: str=4 , _lowerCAmelCase: str=37 , _lowerCAmelCase: Any="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Union[str, Any]=0.1 , _lowerCAmelCase: Union[str, Any]=10 , _lowerCAmelCase: List[str]=0.02 , _lowerCAmelCase: Union[str, Any]=0.9 , _lowerCAmelCase: int=None , ): lowercase :Dict = parent lowercase :Optional[int] = batch_size lowercase :List[Any] = image_size lowercase :int = num_channels lowercase :Any = patch_size lowercase :str = tubelet_size lowercase :Optional[Any] = num_frames lowercase :Optional[Any] = is_training lowercase :Tuple = use_labels lowercase :Union[str, Any] = hidden_size lowercase :Any = num_hidden_layers lowercase :Optional[Any] = num_attention_heads lowercase :Optional[int] = intermediate_size lowercase :Union[str, Any] = hidden_act lowercase :int = hidden_dropout_prob lowercase :List[str] = attention_probs_dropout_prob lowercase :List[str] = type_sequence_label_size lowercase :Union[str, Any] = initializer_range lowercase :Optional[Any] = mask_ratio lowercase :List[Any] = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame lowercase :List[str] = (image_size // patch_size) ** 2 lowercase :Dict = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos lowercase :Optional[Any] = int(mask_ratio * self.seq_length ) def SCREAMING_SNAKE_CASE ( self: List[Any] ): lowercase :Tuple = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowercase :Dict = None if self.use_labels: lowercase :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase :Any = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self: List[str] ): return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Dict ): lowercase :List[Any] = VideoMAEModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowercase :int = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: int , _lowerCAmelCase: str , _lowerCAmelCase: Optional[int] ): lowercase :str = VideoMAEForPreTraining(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase :Tuple = torch.ones((self.num_masks,) ) lowercase :str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) lowercase :Optional[int] = mask.expand(self.batch_size , -1 ).bool() lowercase :List[Any] = model(_lowerCAmelCase , _lowerCAmelCase ) # model only returns predictions for masked patches lowercase :Any = mask.sum().item() lowercase :Optional[Any] = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def SCREAMING_SNAKE_CASE ( self: Optional[int] ): lowercase :Union[str, Any] = self.prepare_config_and_inputs() lowercase , lowercase , lowercase :str = config_and_inputs lowercase :Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase): _a = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) _a = ( {'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification} if is_torch_available() else {} ) _a = False _a = False _a = False _a = False def SCREAMING_SNAKE_CASE ( self: Dict ): lowercase :str = VideoMAEModelTester(self ) lowercase :str = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: str , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[Any]=False ): lowercase :Union[str, Any] = copy.deepcopy(_lowerCAmelCase ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase :Tuple = torch.ones((self.model_tester.num_masks,) ) lowercase :Optional[Any] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) lowercase :List[Any] = mask.expand(self.model_tester.batch_size , -1 ).bool() lowercase :Optional[int] = bool_masked_pos.to(_lowerCAmelCase ) if return_labels: if model_class in [ *get_values(_lowerCAmelCase ), ]: lowercase :List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase ) return inputs_dict def SCREAMING_SNAKE_CASE ( self: Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason="VideoMAE does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE ( self: str ): pass def SCREAMING_SNAKE_CASE ( self: Optional[Any] ): lowercase , lowercase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase :Union[str, Any] = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase :List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE ( self: List[Any] ): lowercase , lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase :Tuple = model_class(_lowerCAmelCase ) lowercase :Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase :Optional[int] = [*signature.parameters.keys()] lowercase :List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: List[str] ): lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: int ): lowercase :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self: Any ): for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase :int = VideoMAEModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: str ): if not self.has_attentions: pass else: lowercase , lowercase :Dict = self.model_tester.prepare_config_and_inputs_for_common() lowercase :Optional[Any] = True for model_class in self.all_model_classes: lowercase :Tuple = self.model_tester.seq_length - self.model_tester.num_masks lowercase :Dict = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) lowercase :Any = True lowercase :Tuple = False lowercase :str = True lowercase :List[Any] = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowercase :List[Any] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) lowercase :Any = outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase :Optional[Any] = True lowercase :str = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowercase :Any = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) lowercase :List[Any] = outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowercase :int = len(_lowerCAmelCase ) # Check attention is always last and order is fine lowercase :int = True lowercase :Union[str, Any] = True lowercase :int = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowercase :Optional[int] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) self.assertEqual(out_len + 1 , len(_lowerCAmelCase ) ) lowercase :Tuple = outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def SCREAMING_SNAKE_CASE ( self: str ): def check_hidden_states_output(_lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Tuple ): lowercase :Dict = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowercase :Tuple = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) lowercase :Tuple = outputs.hidden_states lowercase :Any = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) lowercase :str = self.model_tester.seq_length - self.model_tester.num_masks lowercase :List[str] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase , lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase :Any = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase :Optional[Any] = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def SCREAMING_SNAKE_CASE ( self: List[Any] ): pass def UpperCAmelCase__ ( ): lowercase :str = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" ) lowercase :List[str] = np.load(lowerCamelCase ) return list(lowerCamelCase ) @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE ( self: Dict ): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE ( self: Any ): lowercase :Tuple = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to( _lowerCAmelCase ) lowercase :Tuple = self.default_image_processor lowercase :Optional[Any] = prepare_video() lowercase :str = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowercase :List[str] = model(**_lowerCAmelCase ) # verify the logits lowercase :Optional[int] = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) lowercase :Optional[int] = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) ) @slow def SCREAMING_SNAKE_CASE ( self: Tuple ): lowercase :List[str] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(_lowerCAmelCase ) lowercase :List[Any] = self.default_image_processor lowercase :str = prepare_video() lowercase :Optional[int] = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase ) # add boolean mask, indicating which patches to mask lowercase :Optional[Any] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" ) lowercase :str = torch.load(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowercase :Optional[Any] = model(**_lowerCAmelCase ) # verify the logits lowercase :str = torch.Size([1, 14_08, 15_36] ) lowercase :Union[str, Any] = torch.tensor( [[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=_lowerCAmelCase ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) lowercase :Union[str, Any] = torch.tensor([0.51_42] , device=_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) lowercase :Any = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=_lowerCAmelCase ).to( _lowerCAmelCase ) with torch.no_grad(): lowercase :List[str] = model(**_lowerCAmelCase ) lowercase :Tuple = torch.tensor(torch.tensor([0.64_69] ) , device=_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1e-4 ) )
158
1
"""simple docstring""" import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a : def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=[3_0, 3_0] , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=3_2 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1_0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=8 , _lowerCamelCase=1_0 , ): lowercase = parent lowercase = batch_size lowercase = image_size lowercase = patch_size lowercase = num_channels lowercase = is_training lowercase = use_labels lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = type_sequence_label_size lowercase = initializer_range lowercase = num_labels lowercase = scope lowercase = n_targets lowercase = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens lowercase = (image_size[1] // patch_size) * (image_size[0] // patch_size) lowercase = num_patches + 1 + self.num_detection_tokens def UpperCamelCase_ ( self ): lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) lowercase = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) lowercase = [] for i in range(self.batch_size ): lowercase = {} lowercase = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=_lowerCamelCase ) lowercase = torch.rand(self.n_targets , 4 , device=_lowerCamelCase ) labels.append(_lowerCamelCase ) lowercase = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): lowercase = YolosModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() lowercase = model(_lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): lowercase = YolosForObjectDetection(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() lowercase = model(pixel_values=_lowerCamelCase ) lowercase = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) lowercase = model(pixel_values=_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def UpperCamelCase_ ( self ): lowercase = self.prepare_config_and_inputs() lowercase , lowercase , lowercase = config_and_inputs lowercase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class a ( a_, a_, unittest.TestCase ): UpperCAmelCase_ : Any =(YolosModel, YolosForObjectDetection) if is_torch_available() else () UpperCAmelCase_ : Dict =( {"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {} ) UpperCAmelCase_ : List[str] =False UpperCAmelCase_ : str =False UpperCAmelCase_ : Tuple =False UpperCAmelCase_ : int =False def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ): lowercase = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": lowercase = [] for i in range(self.model_tester.batch_size ): lowercase = {} lowercase = torch.ones( size=(self.model_tester.n_targets,) , device=_lowerCamelCase , dtype=torch.long ) lowercase = torch.ones( self.model_tester.n_targets , 4 , device=_lowerCamelCase , dtype=torch.float ) labels.append(_lowerCamelCase ) lowercase = labels return inputs_dict def UpperCamelCase_ ( self ): lowercase = YolosModelTester(self ) lowercase = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=3_7 ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self ): # YOLOS does not use inputs_embeds pass def UpperCamelCase_ ( self ): lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(_lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) ) def UpperCamelCase_ ( self ): lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(_lowerCamelCase ) lowercase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase = [*signature.parameters.keys()] lowercase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def UpperCamelCase_ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def UpperCamelCase_ ( self ): lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() lowercase = True # in YOLOS, the seq_len is different lowercase = self.model_tester.expected_seq_len for model_class in self.all_model_classes: lowercase = True lowercase = False lowercase = True lowercase = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): lowercase = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) lowercase = outputs.attentions self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase = True lowercase = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): lowercase = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) lowercase = outputs.attentions self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowercase = len(_lowerCamelCase ) # Check attention is always last and order is fine lowercase = True lowercase = True lowercase = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): lowercase = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) lowercase = 1 self.assertEqual(out_len + added_hidden_states , len(_lowerCamelCase ) ) lowercase = outputs.attentions self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def UpperCamelCase_ ( self ): def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): lowercase = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): lowercase = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) lowercase = outputs.hidden_states lowercase = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase ) # YOLOS has a different seq_length lowercase = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def UpperCamelCase_ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*_lowerCamelCase ) @slow def UpperCamelCase_ ( self ): for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = YolosModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class a ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self ): return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): lowercase = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(_lowerCamelCase ) lowercase = self.default_image_processor lowercase = prepare_img() lowercase = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): lowercase = model(inputs.pixel_values ) # verify outputs lowercase = torch.Size((1, 1_0_0, 9_2) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) lowercase = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=_lowerCamelCase , ) lowercase = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _lowerCamelCase , atol=1e-4 ) ) # verify postprocessing lowercase = image_processor.post_process_object_detection( _lowerCamelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] lowercase = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(_lowerCamelCase ) lowercase = [7_5, 7_5, 1_7, 6_3, 1_7] lowercase = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(_lowerCamelCase ) self.assertEqual(len(results['scores'] ) , 5 ) self.assertTrue(torch.allclose(results['scores'] , _lowerCamelCase , atol=1e-4 ) ) self.assertSequenceEqual(results['labels'].tolist() , _lowerCamelCase ) self.assertTrue(torch.allclose(results['boxes'][0, :] , _lowerCamelCase ) )
220
"""simple docstring""" import requests from bsa import BeautifulSoup def _SCREAMING_SNAKE_CASE ( __snake_case : str = "AAPL" ): '''simple docstring''' lowercase = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}' lowercase = BeautifulSoup(requests.get(__snake_case ).text , 'html.parser' ) lowercase = 'My(6px) Pos(r) smartphone_Mt(6px)' return soup.find('div' , class_=class_ ).find('span' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
220
1
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup _lowerCamelCase = logging.get_logger(__name__) class a ( _A ): '''simple docstring''' def __init__( self : Dict , **__snake_case : List[Any] ): requires_backends(self , ['''bs4'''] ) super().__init__(**__snake_case ) def lowerCamelCase_ ( self : List[str] , __snake_case : str ): UpperCAmelCase_ = [] UpperCAmelCase_ = [] UpperCAmelCase_ = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag UpperCAmelCase_ = parent.find_all(child.name , recursive=__snake_case ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(__snake_case ) else next(i for i, s in enumerate(__snake_case , 1 ) if s is child ) ) UpperCAmelCase_ = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def lowerCamelCase_ ( self : Optional[int] , __snake_case : int ): UpperCAmelCase_ = BeautifulSoup(__snake_case , '''html.parser''' ) UpperCAmelCase_ = [] UpperCAmelCase_ = [] UpperCAmelCase_ = [] for element in html_code.descendants: if type(__snake_case ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue UpperCAmelCase_ = html.unescape(__snake_case ).strip() if not text_in_this_tag: continue all_doc_strings.append(__snake_case ) UpperCAmelCase_ , UpperCAmelCase_ = self.xpath_soup(__snake_case ) stringaxtag_seq.append(__snake_case ) stringaxsubs_seq.append(__snake_case ) if len(__snake_case ) != len(__snake_case ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(__snake_case ) != len(__snake_case ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def lowerCamelCase_ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any] ): UpperCAmelCase_ = '''''' for tagname, subs in zip(__snake_case , __snake_case ): xpath += F'/{tagname}' if subs != 0: xpath += F'[{subs}]' return xpath def __call__( self : Optional[Any] , __snake_case : Union[str, Any] ): UpperCAmelCase_ = False # Check that strings has a valid type if isinstance(__snake_case , __snake_case ): UpperCAmelCase_ = True elif isinstance(__snake_case , (list, tuple) ): if len(__snake_case ) == 0 or isinstance(html_strings[0] , __snake_case ): UpperCAmelCase_ = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F'but is of type {type(__snake_case )}.' ) UpperCAmelCase_ = bool(isinstance(__snake_case , (list, tuple) ) and (isinstance(html_strings[0] , __snake_case )) ) if not is_batched: UpperCAmelCase_ = [html_strings] # Get nodes + xpaths UpperCAmelCase_ = [] UpperCAmelCase_ = [] for html_string in html_strings: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.get_three_from_single(__snake_case ) nodes.append(__snake_case ) UpperCAmelCase_ = [] for node, tag_list, sub_list in zip(__snake_case , __snake_case , __snake_case ): UpperCAmelCase_ = self.construct_xpath(__snake_case , __snake_case ) xpath_strings.append(__snake_case ) xpaths.append(__snake_case ) # return as Dict UpperCAmelCase_ = {'''nodes''': nodes, '''xpaths''': xpaths} UpperCAmelCase_ = BatchFeature(data=__snake_case , tensor_type=__snake_case ) return encoded_inputs
177
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _lowerCamelCase = logging.get_logger(__name__) class a ( _A ): '''simple docstring''' lowerCAmelCase : List[Any] = ['pixel_values'] def __init__( self : Any , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , __snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **__snake_case : Optional[Any] , ): super().__init__(**__snake_case ) UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 2_24} UpperCAmelCase_ = get_size_dict(__snake_case , default_to_square=__snake_case ) UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} UpperCAmelCase_ = get_size_dict(__snake_case , param_name='''crop_size''' ) UpperCAmelCase_ = do_resize UpperCAmelCase_ = size UpperCAmelCase_ = resample UpperCAmelCase_ = do_center_crop UpperCAmelCase_ = crop_size UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowerCamelCase_ ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ): UpperCAmelCase_ = get_size_dict(__snake_case , default_to_square=__snake_case ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: UpperCAmelCase_ = int((2_56 / 2_24) * size['''shortest_edge'''] ) UpperCAmelCase_ = get_resize_output_image_size(__snake_case , size=__snake_case , default_to_square=__snake_case ) UpperCAmelCase_ = {'''height''': output_size[0], '''width''': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' ) return resize( __snake_case , size=(size_dict['''height'''], size_dict['''width''']) , resample=__snake_case , data_format=__snake_case , **__snake_case ) def lowerCamelCase_ ( self : Tuple , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[int] , ): UpperCAmelCase_ = get_size_dict(__snake_case ) if "height" not in size or "width" not in size: raise ValueError(F'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' ) return center_crop(__snake_case , size=(size['''height'''], size['''width''']) , data_format=__snake_case , **__snake_case ) def lowerCamelCase_ ( self : Optional[Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ): return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case ) def lowerCamelCase_ ( self : int , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Union[str, Any] , ): return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case ) def lowerCamelCase_ ( self : int , __snake_case : ImageInput , __snake_case : Optional[bool] = None , __snake_case : Optional[Dict[str, int]] = None , __snake_case : PILImageResampling = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Dict[str, int]] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[float] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[float, Iterable[float]]] = None , __snake_case : Optional[Union[float, Iterable[float]]] = None , __snake_case : Optional[TensorType] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[str] , ): UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ = resample if resample is not None else self.resample UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ = image_std if image_std is not None else self.image_std UpperCAmelCase_ = size if size is not None else self.size UpperCAmelCase_ = get_size_dict(__snake_case , default_to_square=__snake_case ) UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ = get_size_dict(__snake_case , param_name='''crop_size''' ) UpperCAmelCase_ = make_list_of_images(__snake_case ) if not valid_images(__snake_case ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(__snake_case ) for image in images] if do_resize: UpperCAmelCase_ = [self.resize(__snake_case , __snake_case , __snake_case ) for image in images] if do_center_crop: UpperCAmelCase_ = [self.center_crop(__snake_case , __snake_case ) for image in images] if do_rescale: UpperCAmelCase_ = [self.rescale(__snake_case , __snake_case ) for image in images] if do_normalize: UpperCAmelCase_ = [self.normalize(__snake_case , __snake_case , __snake_case ) for image in images] UpperCAmelCase_ = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images] UpperCAmelCase_ = {'''pixel_values''': images} return BatchFeature(data=__snake_case , tensor_type=__snake_case )
177
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class __A : def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[int]=99 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict=0 , ): lowerCAmelCase : Any = parent lowerCAmelCase : int = batch_size lowerCAmelCase : Optional[int] = seq_length lowerCAmelCase : Optional[Any] = is_training lowerCAmelCase : Optional[Any] = use_input_mask lowerCAmelCase : Union[str, Any] = use_token_type_ids lowerCAmelCase : Any = use_labels lowerCAmelCase : Dict = vocab_size lowerCAmelCase : str = hidden_size lowerCAmelCase : str = num_hidden_layers lowerCAmelCase : int = num_attention_heads lowerCAmelCase : List[Any] = intermediate_size lowerCAmelCase : str = hidden_act lowerCAmelCase : Optional[Any] = hidden_dropout_prob lowerCAmelCase : Any = attention_probs_dropout_prob lowerCAmelCase : str = max_position_embeddings lowerCAmelCase : Optional[int] = type_vocab_size lowerCAmelCase : str = type_sequence_label_size lowerCAmelCase : int = initializer_range lowerCAmelCase : Dict = num_labels lowerCAmelCase : List[Any] = num_choices lowerCAmelCase : Optional[Any] = scope lowerCAmelCase : Optional[Any] = projection_dim def lowercase__ ( self : Any ): lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase : List[Any] = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase : Any = None if self.use_token_type_ids: lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase : Tuple = None lowerCAmelCase : Optional[Any] = None lowerCAmelCase : str = None if self.use_labels: lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase : Dict = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , ) lowerCAmelCase : Tuple = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str ): lowerCAmelCase : int = TFDPRContextEncoder(config=UpperCAmelCase_ ) lowerCAmelCase : Any = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ ) lowerCAmelCase : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ ) lowerCAmelCase : Tuple = model(UpperCAmelCase_ ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] ): lowerCAmelCase : List[str] = TFDPRQuestionEncoder(config=UpperCAmelCase_ ) lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ ) lowerCAmelCase : int = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ ) lowerCAmelCase : Any = model(UpperCAmelCase_ ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def lowercase__ ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ): lowerCAmelCase : Optional[int] = TFDPRReader(config=UpperCAmelCase_ ) lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) ) def lowercase__ ( self : Any ): lowerCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ) : List[Any] = config_and_inputs lowerCAmelCase : str = {'input_ids': input_ids} return config, inputs_dict @require_tf class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): lowerCAmelCase_ : int = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) lowerCAmelCase_ : str = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {} lowerCAmelCase_ : Optional[int] = False lowerCAmelCase_ : Union[str, Any] = False lowerCAmelCase_ : str = False lowerCAmelCase_ : List[Any] = False lowerCAmelCase_ : Dict = False def lowercase__ ( self : List[str] ): lowerCAmelCase : Optional[Any] = TFDPRModelTester(self ) lowerCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 ) def lowercase__ ( self : Union[str, Any] ): self.config_tester.run_common_tests() def lowercase__ ( self : Any ): lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase_ ) def lowercase__ ( self : Any ): lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase_ ) def lowercase__ ( self : Dict ): lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase_ ) @slow def lowercase__ ( self : List[Any] ): for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase : int = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase : str = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase : Any = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase : Optional[int] = TFDPRReader.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) @require_tf class __A ( unittest.TestCase ): @slow def lowercase__ ( self : Any ): lowerCAmelCase : List[str] = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' ) lowerCAmelCase : List[Any] = tf.constant( [[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP] lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0] # embedding shape = (1, 768) # compare the actual values for a slice. lowerCAmelCase : List[Any] = tf.constant( [ [ 0.03_23_62_53, 0.12_75_33_35, 0.16_81_85_09, 0.00_27_97_86, 0.3_89_69_33, 0.24_26_49_45, 0.2_17_89_71, -0.02_33_52_27, -0.08_48_19_59, -0.14_32_41_17, ] ] ) self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
138
import math def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float: '''simple docstring''' return math.pow(_UpperCAmelCase, 2 ) - a def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float: '''simple docstring''' return 2 * x def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float: '''simple docstring''' lowerCAmelCase : Any = 2.0 while start <= a: lowerCAmelCase : Dict = math.pow(_UpperCAmelCase, 2 ) return start def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = 9_999, _UpperCAmelCase = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ) -> float: '''simple docstring''' if a < 0: raise ValueError('math domain error' ) lowerCAmelCase : Optional[Any] = get_initial_point(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ): lowerCAmelCase : Any = value lowerCAmelCase : int = value - fx(_UpperCAmelCase, _UpperCAmelCase ) / fx_derivative(_UpperCAmelCase ) if abs(prev_value - value ) < tolerance: return value return value if __name__ == "__main__": from doctest import testmod testmod()
138
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class SCREAMING_SNAKE_CASE__ ( snake_case_): lowerCAmelCase_ = ["""image_processor""", """tokenizer"""] lowerCAmelCase_ = """ViltImageProcessor""" lowerCAmelCase_ = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self , A_=None , A_=None , **A_ )-> Dict: '''simple docstring''' UpperCamelCase = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , A_ , ) UpperCamelCase = kwargs.pop('feature_extractor' ) UpperCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(A_ , A_ ) UpperCamelCase = self.image_processor def __call__( self , A_ , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , )-> BatchEncoding: '''simple docstring''' UpperCamelCase = self.tokenizer( text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , ) # add pixel_values + pixel_mask UpperCamelCase = self.image_processor(A_ , return_tensors=A_ ) encoding.update(A_ ) return encoding def UpperCAmelCase_ ( self , *A_ , **A_ )-> Optional[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*A_ , **A_ ) def UpperCAmelCase_ ( self , *A_ , **A_ )-> Tuple: '''simple docstring''' return self.tokenizer.decode(*A_ , **A_ ) @property def UpperCAmelCase_ ( self )-> str: '''simple docstring''' UpperCamelCase = self.tokenizer.model_input_names UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCAmelCase_ ( self )-> List[str]: '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , A_ , ) return self.image_processor_class @property def UpperCAmelCase_ ( self )-> Dict: '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , A_ , ) return self.image_processor
251
'''simple docstring''' import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer lowerCAmelCase : int = 'bart' lowerCAmelCase : Union[str, Any] = True @st.cache(allow_output_mutation=A) def A_( ): if LOAD_DENSE_INDEX: UpperCamelCase = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased') UpperCamelCase = AutoModel.from_pretrained('yjernite/retribert-base-uncased').to('cuda:0') UpperCamelCase = qar_model.eval() else: UpperCamelCase , UpperCamelCase = (None, None) if MODEL_TYPE == "bart": UpperCamelCase = AutoTokenizer.from_pretrained('yjernite/bart_eli5') UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5').to('cuda:0') UpperCamelCase = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth') sas_model.load_state_dict(save_dict['model']) UpperCamelCase = sas_model.eval() else: UpperCamelCase , UpperCamelCase = make_qa_sas_model( model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0') return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=A) def A_( ): if LOAD_DENSE_INDEX: UpperCamelCase = faiss.StandardGpuResources() UpperCamelCase = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0')['train'] UpperCamelCase = np.memmap( 'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , ) UpperCamelCase = faiss.IndexFlatIP(128) UpperCamelCase = faiss.index_cpu_to_gpu(A , 1 , A) wikiaab_gpu_index_flat.add(A) # TODO fix for larger GPU else: UpperCamelCase , UpperCamelCase = (None, None) UpperCamelCase = Elasticsearch([{'host': 'localhost', 'port': '9200'}]) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=A) def A_( ): UpperCamelCase = datasets.load_dataset('eli5' , name='LFQA_reddit') UpperCamelCase = elia['train_eli5'] UpperCamelCase = np.memmap( 'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128)) UpperCamelCase = faiss.IndexFlatIP(128) eli5_train_q_index.add(A) return (elia_train, eli5_train_q_index) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = load_indexes() lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = load_models() lowerCAmelCase , lowerCAmelCase : Tuple = load_train_data() def A_( A : Dict , A : str=10): UpperCamelCase = embed_questions_for_retrieval([question] , A , A) UpperCamelCase , UpperCamelCase = eli5_train_q_index.search(A , A) UpperCamelCase = [elia_train[int(A)] for i in I[0]] return nn_examples def A_( A : str , A : Optional[int]="wiki40b" , A : List[Any]="dense" , A : Dict=10): if source == "none": UpperCamelCase , UpperCamelCase = (' <P> '.join(['' for _ in range(11)]).strip(), []) else: if method == "dense": UpperCamelCase , UpperCamelCase = query_qa_dense_index( A , A , A , A , A , A) else: UpperCamelCase , UpperCamelCase = query_es_index( A , A , index_name='english_wiki40b_snippets_100w' , n_results=A , ) UpperCamelCase = [ (res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst ] UpperCamelCase = 'question: {} context: {}'.format(A , A) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda A: None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda A: None), }) def A_( A : Optional[Any] , A : List[str] , A : str , A : List[Any]=64 , A : List[Any]=256 , A : int=False , A : List[str]=2 , A : Any=0.95 , A : int=0.8): with torch.no_grad(): UpperCamelCase = qa_sas_generate( A , A , A , num_answers=1 , num_beams=A , min_len=A , max_len=A , do_sample=A , temp=A , top_p=A , top_k=A , max_input_length=1024 , device='cuda:0' , )[0] return (answer, support_list) st.title('Long Form Question Answering with ELI5') # Start sidebar lowerCAmelCase : List[str] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>' lowerCAmelCase : Dict = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia lowerCAmelCase : List[Any] = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n' st.sidebar.markdown(description, unsafe_allow_html=True) lowerCAmelCase : Optional[int] = [ 'Answer the question', 'View the retrieved document only', 'View the most similar ELI5 question and answer', 'Show me everything, please!', ] lowerCAmelCase : List[str] = st.sidebar.checkbox('Demo options') if demo_options: lowerCAmelCase : Dict = st.sidebar.selectbox( '', action_list, index=3, ) lowerCAmelCase : int = action_list.index(action_st) lowerCAmelCase : Optional[Any] = st.sidebar.selectbox( '', ['Show full text of passages', 'Show passage section titles'], index=0, ) lowerCAmelCase : Any = show_type == 'Show full text of passages' else: lowerCAmelCase : Optional[Any] = 3 lowerCAmelCase : Optional[Any] = True lowerCAmelCase : Optional[Any] = st.sidebar.checkbox('Retrieval options') if retrieval_options: lowerCAmelCase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n ' st.sidebar.markdown(retriever_info) lowerCAmelCase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none']) lowerCAmelCase : List[str] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed']) else: lowerCAmelCase : str = 'wiki40b' lowerCAmelCase : Tuple = 'dense' lowerCAmelCase : Union[str, Any] = 'beam' lowerCAmelCase : Optional[int] = 2 lowerCAmelCase : Any = 64 lowerCAmelCase : int = 2_56 lowerCAmelCase : Optional[Any] = None lowerCAmelCase : List[str] = None lowerCAmelCase : List[str] = st.sidebar.checkbox('Generation options') if generate_options: lowerCAmelCase : str = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n ' st.sidebar.markdown(generate_info) lowerCAmelCase : str = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled']) lowerCAmelCase : Any = st.sidebar.slider( 'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None ) lowerCAmelCase : Any = st.sidebar.slider( 'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None ) if sampled == "beam": lowerCAmelCase : Union[str, Any] = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: lowerCAmelCase : Tuple = st.sidebar.slider( 'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) lowerCAmelCase : List[Any] = st.sidebar.slider( 'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) lowerCAmelCase : Tuple = None # start main text lowerCAmelCase : Optional[Any] = [ '<MY QUESTION>', 'How do people make chocolate?', 'Why do we get a fever when we are sick?', 'How can different animals perceive different colors?', 'What is natural language processing?', 'What\'s the best way to treat a sunburn?', 'What exactly are vitamins ?', 'How does nuclear energy provide electricity?', 'What\'s the difference between viruses and bacteria?', 'Why are flutes classified as woodwinds when most of them are made out of metal ?', 'Why do people like drinking coffee even though it tastes so bad?', 'What happens when wine ages? How does it make the wine taste better?', 'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?', 'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?', 'How does New Zealand have so many large bird predators?', ] lowerCAmelCase : int = st.selectbox( 'What would you like to ask? ---- select <MY QUESTION> to enter a new query', questions_list, index=1, ) if question_s == "<MY QUESTION>": lowerCAmelCase : List[str] = st.text_input('Enter your question here:', '') else: lowerCAmelCase : Tuple = question_s if st.button('Show me!'): if action in [0, 1, 3]: if index_type == "mixed": lowerCAmelCase , lowerCAmelCase : Any = make_support(question, source=wiki_source, method='dense', n_results=10) lowerCAmelCase , lowerCAmelCase : Dict = make_support(question, source=wiki_source, method='sparse', n_results=10) lowerCAmelCase : List[str] = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] lowerCAmelCase : Optional[Any] = support_list[:10] lowerCAmelCase : Dict = '<P> ' + ' <P> '.join([res[-1] for res in support_list]) else: lowerCAmelCase , lowerCAmelCase : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: lowerCAmelCase , lowerCAmelCase : Union[str, Any] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == 'sampled'), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('### The model generated answer is:') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:') for i, res in enumerate(support_list): lowerCAmelCase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_')) lowerCAmelCase : Optional[int] = res[1].strip() if sec_titles == "": lowerCAmelCase : Dict = '[{}]({})'.format(res[0], wiki_url) else: lowerCAmelCase : Dict = sec_titles.split(' & ') lowerCAmelCase : str = ' & '.join( ['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list] ) st.markdown( '{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True ) if action in [2, 3]: lowerCAmelCase : Optional[Any] = find_nearest_training(question) lowerCAmelCase : Optional[int] = nn_train_list[0] st.markdown( '--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title']) ) lowerCAmelCase : List[str] = [ '{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != ''])) for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score'])) if i == 0 or sc > 2 ] st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st))) lowerCAmelCase : int = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
251
1
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( snake_case_ ): __UpperCAmelCase : Optional[int] = (PNDMScheduler,) __UpperCAmelCase : Union[str, Any] = (('''num_inference_steps''', 50),) def lowerCamelCase ( self , **UpperCamelCase__ ) -> List[Any]: '''simple docstring''' snake_case : List[Any] = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**UpperCamelCase__ ) return config def lowerCamelCase ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> List[Any]: '''simple docstring''' snake_case : int = dict(self.forward_default_kwargs ) snake_case : Optional[Any] = kwargs.pop("num_inference_steps" , UpperCamelCase__ ) snake_case : Optional[int] = self.dummy_sample snake_case : Dict = 0.1 * sample snake_case : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: snake_case : Any = self.get_scheduler_config(**UpperCamelCase__ ) snake_case : List[Any] = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(UpperCamelCase__ ) # copy over dummy past residuals snake_case : Union[str, Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase__ ) snake_case : Optional[int] = scheduler_class.from_pretrained(UpperCamelCase__ ) new_scheduler.set_timesteps(UpperCamelCase__ ) # copy over dummy past residuals snake_case : List[str] = dummy_past_residuals[:] snake_case : Tuple = scheduler.step_prk(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample snake_case : Any = new_scheduler.step_prk(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" snake_case : Any = scheduler.step_plms(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample snake_case : Optional[int] = new_scheduler.step_plms(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase ( self ) -> int: '''simple docstring''' pass def lowerCamelCase ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> Any: '''simple docstring''' snake_case : int = dict(self.forward_default_kwargs ) snake_case : Any = kwargs.pop("num_inference_steps" , UpperCamelCase__ ) snake_case : str = self.dummy_sample snake_case : Tuple = 0.1 * sample snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: snake_case : Optional[int] = self.get_scheduler_config() snake_case : Optional[Any] = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(UpperCamelCase__ ) # copy over dummy past residuals (must be after setting timesteps) snake_case : Optional[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase__ ) snake_case : List[Any] = scheduler_class.from_pretrained(UpperCamelCase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCamelCase__ ) # copy over dummy past residual (must be after setting timesteps) snake_case : str = dummy_past_residuals[:] snake_case : Optional[Any] = scheduler.step_prk(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample snake_case : Union[str, Any] = new_scheduler.step_prk(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" snake_case : Optional[int] = scheduler.step_plms(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample snake_case : List[str] = new_scheduler.step_plms(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase ( self , **UpperCamelCase__ ) -> str: '''simple docstring''' snake_case : Optional[int] = self.scheduler_classes[0] snake_case : Optional[int] = self.get_scheduler_config(**UpperCamelCase__ ) snake_case : Dict = scheduler_class(**UpperCamelCase__ ) snake_case : List[Any] = 10 snake_case : Optional[int] = self.dummy_model() snake_case : Any = self.dummy_sample_deter scheduler.set_timesteps(UpperCamelCase__ ) for i, t in enumerate(scheduler.prk_timesteps ): snake_case : str = model(UpperCamelCase__ , UpperCamelCase__ ) snake_case : List[Any] = scheduler.step_prk(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): snake_case : Optional[Any] = model(UpperCamelCase__ , UpperCamelCase__ ) snake_case : Dict = scheduler.step_plms(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample return sample def lowerCamelCase ( self ) -> str: '''simple docstring''' snake_case : str = dict(self.forward_default_kwargs ) snake_case : Tuple = kwargs.pop("num_inference_steps" , UpperCamelCase__ ) for scheduler_class in self.scheduler_classes: snake_case : List[str] = self.get_scheduler_config() snake_case : Union[str, Any] = scheduler_class(**UpperCamelCase__ ) snake_case : Tuple = self.dummy_sample snake_case : str = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCamelCase__ , "set_timesteps" ): scheduler.set_timesteps(UpperCamelCase__ ) elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , "set_timesteps" ): snake_case : Optional[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) snake_case : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] snake_case : List[Any] = dummy_past_residuals[:] snake_case : str = scheduler.step_prk(UpperCamelCase__ , 0 , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample snake_case : Union[str, Any] = scheduler.step_prk(UpperCamelCase__ , 1 , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) snake_case : Tuple = scheduler.step_plms(UpperCamelCase__ , 0 , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample snake_case : Any = scheduler.step_plms(UpperCamelCase__ , 1 , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase ( self ) -> Dict: '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def lowerCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=UpperCamelCase__ ) snake_case : Any = self.scheduler_classes[0] snake_case : List[Any] = self.get_scheduler_config(steps_offset=1 ) snake_case : List[Any] = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , ) def lowerCamelCase ( self ) -> Dict: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ ) def lowerCamelCase ( self ) -> List[str]: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase__ ) def lowerCamelCase ( self ) -> str: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def lowerCamelCase ( self ) -> List[str]: '''simple docstring''' for t in [1, 5, 10]: self.check_over_forward(time_step=UpperCamelCase__ ) def lowerCamelCase ( self ) -> List[str]: '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=UpperCamelCase__ ) def lowerCamelCase ( self ) -> Tuple: '''simple docstring''' snake_case : Union[str, Any] = 27 for scheduler_class in self.scheduler_classes: snake_case : int = self.dummy_sample snake_case : str = 0.1 * sample snake_case : Optional[Any] = self.get_scheduler_config() snake_case : List[Any] = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(UpperCamelCase__ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): snake_case : List[Any] = scheduler.step_prk(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample def lowerCamelCase ( self ) -> int: '''simple docstring''' with self.assertRaises(UpperCamelCase__ ): snake_case : Any = self.scheduler_classes[0] snake_case : Tuple = self.get_scheduler_config() snake_case : Optional[int] = scheduler_class(**UpperCamelCase__ ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def lowerCamelCase ( self ) -> Tuple: '''simple docstring''' snake_case : Union[str, Any] = self.full_loop() snake_case : int = torch.sum(torch.abs(UpperCamelCase__ ) ) snake_case : str = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def lowerCamelCase ( self ) -> List[str]: '''simple docstring''' snake_case : List[str] = self.full_loop(prediction_type="v_prediction" ) snake_case : Union[str, Any] = torch.sum(torch.abs(UpperCamelCase__ ) ) snake_case : Tuple = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def lowerCamelCase ( self ) -> List[Any]: '''simple docstring''' snake_case : str = self.full_loop(set_alpha_to_one=UpperCamelCase__ , beta_start=0.01 ) snake_case : List[str] = torch.sum(torch.abs(UpperCamelCase__ ) ) snake_case : Any = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def lowerCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' snake_case : Optional[int] = self.full_loop(set_alpha_to_one=UpperCamelCase__ , beta_start=0.01 ) snake_case : Union[str, Any] = torch.sum(torch.abs(UpperCamelCase__ ) ) snake_case : Optional[int] = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
203
"""simple docstring""" import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging __snake_case = logging.get_logger(__name__) def __lowerCAmelCase ( ) -> str: """simple docstring""" snake_case : Dict = os.getenv("SM_HP_MP_PARAMETERS" , "{}" ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. snake_case : Optional[int] = json.loads(lowercase ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. snake_case : Optional[int] = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". snake_case : Any = json.loads(lowercase ) if not mpi_options.get("sagemaker_mpi_enabled" , lowercase ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("smdistributed" ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class _lowerCAmelCase ( snake_case_ ): __UpperCAmelCase : str = field( default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , ) def lowerCamelCase ( self ) -> List[Any]: '''simple docstring''' super().__post_init__() warnings.warn( "`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use " "`TrainingArguments` instead." , UpperCamelCase__ , ) @cached_property def lowerCamelCase ( self ) -> "torch.device": '''simple docstring''' logger.info("PyTorch: setting up devices" ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( "torch.distributed process group is initialized, but local_rank == -1. " "In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" ) if self.no_cuda: snake_case : Optional[Any] = torch.device("cpu" ) snake_case : List[Any] = 0 elif is_sagemaker_model_parallel_available(): snake_case : Tuple = smp.local_rank() snake_case : int = torch.device("cuda" , UpperCamelCase__ ) snake_case : Dict = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta ) snake_case : Any = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) ) snake_case : Optional[Any] = torch.device("cuda" , self.local_rank ) snake_case : str = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 snake_case : List[str] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. snake_case : Optional[Any] = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta ) snake_case : Any = torch.device("cuda" , self.local_rank ) snake_case : Dict = 1 if device.type == "cuda": torch.cuda.set_device(UpperCamelCase__ ) return device @property def lowerCamelCase ( self ) -> Optional[int]: '''simple docstring''' if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def lowerCamelCase ( self ) -> List[str]: '''simple docstring''' return not is_sagemaker_model_parallel_available() @property def lowerCamelCase ( self ) -> str: '''simple docstring''' return False
203
1
from __future__ import annotations def __lowercase ( a__ , a__ ) -> set[str]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = set(a__ ), [start] while stack: __SCREAMING_SNAKE_CASE = stack.pop() explored.add(a__ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(a__ ) return explored lowerCAmelCase__ : Tuple ={ '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
118
from collections import deque from .hash_table import HashTable class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' def __init__( self , *_A , **_A ): '''simple docstring''' super().__init__(*_A , **_A ) def _A ( self , _A , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(_A ) __SCREAMING_SNAKE_CASE = self.values[key] def _A ( self ): '''simple docstring''' return ( sum(self.charge_factor - len(_A ) for slot in self.values ) / self.size_table * self.charge_factor ) def _A ( self , _A , _A=None ): '''simple docstring''' if not ( len(self.values[key] ) == self.charge_factor and self.values.count(_A ) == 0 ): return key return super()._collision_resolution(_A , _A )
118
1
a__: Any = {str(digit): digit**5 for digit in range(10)} def UpperCamelCase__( UpperCamelCase__ : Optional[int] )->str: return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase__ ) ) def UpperCamelCase__( )->str: return sum( number for number in range(10_00 , 1_00_00_00 ) if number == digits_fifth_powers_sum(UpperCamelCase__ ) ) if __name__ == "__main__": print(solution())
193
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' a__ : Optional[Any] = """facebook/bart-large-mnli""" a__ : int = ( """This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """ """should be the text to classify, and `labels`, which should be the list of labels to use for classification. """ """It returns the most likely label in the list of provided `labels` for the input text.""" ) a__ : Optional[Any] = """text_classifier""" a__ : Any = AutoTokenizer a__ : str = AutoModelForSequenceClassification a__ : str = ["""text""", ["""text"""]] a__ : Optional[int] = ["""text"""] def UpperCamelCase__ ( self) -> Union[str, Any]: super().setup() __UpperCamelCase :int = self.model.config __UpperCamelCase :Optional[Any] = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('''entail'''): __UpperCamelCase :List[Any] = int(__lowercase) if self.entailment_id == -1: raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''') def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Union[str, Any]: __UpperCamelCase :Any = labels return self.pre_processor( [text] * len(__lowercase) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , ) def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]: __UpperCamelCase :List[Any] = outputs.logits __UpperCamelCase :Any = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
43
0
"""simple docstring""" import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __lowerCAmelCase : @staticmethod def UpperCAmelCase ( *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' pass @is_pipeline_test @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): lowercase = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' ) __UpperCamelCase = [ { 'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'question': 'How many cats are there?', }, { 'image': './tests/fixtures/tests_samples/COCO/000000039769.png', 'question': 'How many cats are there?', }, ] return vqa_pipeline, examples def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = vqa_pipeline(__UpperCAmelCase , top_k=1 ) self.assertEqual( __UpperCAmelCase , [ [{'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase )}], [{'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase )}], ] , ) @require_torch def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' ) __UpperCamelCase = './tests/fixtures/tests_samples/COCO/000000039769.png' __UpperCamelCase = 'How many cats are there?' __UpperCamelCase = vqa_pipeline(image=__UpperCAmelCase , question='How many cats are there?' , top_k=2 ) self.assertEqual( __UpperCAmelCase , [{'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase )}, {'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase )}] ) __UpperCamelCase = vqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( __UpperCAmelCase , [{'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase )}, {'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase )}] ) @slow @require_torch def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' ) __UpperCamelCase = './tests/fixtures/tests_samples/COCO/000000039769.png' __UpperCamelCase = 'How many cats are there?' __UpperCamelCase = vqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'score': 0.8_7_9_9, 'answer': '2'}, {'score': 0.2_9_6, 'answer': '1'}] ) __UpperCamelCase = vqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'score': 0.8_7_9_9, 'answer': '2'}, {'score': 0.2_9_6, 'answer': '1'}] ) __UpperCamelCase = vqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [[{'score': 0.8_7_9_9, 'answer': '2'}, {'score': 0.2_9_6, 'answer': '1'}]] * 2 , ) @require_tf @unittest.skip('Visual question answering not implemented in TF' ) def UpperCAmelCase ( self ): '''simple docstring''' pass
263
"""simple docstring""" import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' super().__init__( __UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCamelCase = field __UpperCamelCase = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths} __UpperCamelCase = Json( cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , field=__UpperCAmelCase , **__UpperCAmelCase , ) def UpperCAmelCase ( self ): '''simple docstring''' if self.streaming: __UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None self.builder.download_and_prepare( download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , ) __UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory ) return dataset class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(F'num_proc {num_proc} must be an integer > 0.' ) __UpperCamelCase = dataset __UpperCamelCase = path_or_buf __UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __UpperCamelCase = num_proc __UpperCamelCase = 'utf-8' __UpperCamelCase = to_json_kwargs def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.to_json_kwargs.pop('path_or_buf' , __UpperCAmelCase ) __UpperCamelCase = self.to_json_kwargs.pop('orient' , 'records' ) __UpperCamelCase = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False ) __UpperCamelCase = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True ) __UpperCamelCase = self.to_json_kwargs.pop('compression' , __UpperCAmelCase ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F'`datasets` currently does not support {compression} compression' ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , 'wb' , compression=__UpperCAmelCase ) as buffer: __UpperCamelCase = self._write(file_obj=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F'The compression parameter is not supported when writing to a buffer, but compression={compression}' ' was passed. Please provide a local path instead.' ) __UpperCamelCase = self._write( file_obj=self.path_or_buf , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs ) return written def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = args __UpperCamelCase = query_table( table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , ) __UpperCamelCase = batch.to_pandas().to_json( path_or_buf=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **__UpperCAmelCase ) if not json_str.endswith('\n' ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ): __UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(__UpperCAmelCase ) else: __UpperCamelCase , __UpperCamelCase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ): written += file_obj.write(__UpperCAmelCase ) return written
263
1
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter __snake_case = True except ImportError: __snake_case = False __snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]: '''simple docstring''' return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class __snake_case ( lowerCamelCase__ ): @staticmethod def UpperCAmelCase__ ( snake_case__ ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : str =parser.add_parser('''add-new-model''' ) add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' ) add_new_model_parser.add_argument('''--testing_file''' , type=snake_case__ , help='''Configuration file on which to run.''' ) add_new_model_parser.add_argument( '''--path''' , type=snake_case__ , help='''Path to cookiecutter. Should only be used for testing purposes.''' ) add_new_model_parser.set_defaults(func=snake_case__ ) def __init__( self , snake_case__ , snake_case__ , snake_case__=None , *snake_case__ ) -> Dict: '''simple docstring''' UpperCAmelCase : Dict =testing UpperCAmelCase : List[Any] =testing_file UpperCAmelCase : str =path def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' warnings.warn( '''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ''' '''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ''' '''checks, you should use `transformers-cli add-new-model-like` instead.''' ) if not _has_cookiecutter: raise ImportError( '''Model creation dependencies are required to use the `add_new_model` command. Install them by running ''' '''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory UpperCAmelCase : Dict =[directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]] if len(snake_case__ ) > 0: raise ValueError( '''Several directories starting with `cookiecutter-template-` in current working directory. ''' '''Please clean your directory by removing all folders starting with `cookiecutter-template-` or ''' '''change your working directory.''' ) UpperCAmelCase : Tuple =( Path(snake_case__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) UpperCAmelCase : Tuple =path_to_transformer_root / '''templates''' / '''adding_a_new_model''' # Execute cookiecutter if not self._testing: cookiecutter(str(snake_case__ ) ) else: with open(self._testing_file , '''r''' ) as configuration_file: UpperCAmelCase : Tuple =json.load(snake_case__ ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=snake_case__ , extra_context=snake_case__ , ) UpperCAmelCase : List[Any] =[directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0] # Retrieve configuration with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file: UpperCAmelCase : Any =json.load(snake_case__ ) UpperCAmelCase : Tuple =configuration['''lowercase_modelname'''] UpperCAmelCase : Optional[int] =configuration['''generate_tensorflow_pytorch_and_flax'''] os.remove(f'''{directory}/configuration.json''' ) UpperCAmelCase : Any ='''PyTorch''' in generate_tensorflow_pytorch_and_flax UpperCAmelCase : str ='''TensorFlow''' in generate_tensorflow_pytorch_and_flax UpperCAmelCase : Optional[Any] ='''Flax''' in generate_tensorflow_pytorch_and_flax UpperCAmelCase : str =f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}''' os.makedirs(snake_case__ , exist_ok=snake_case__ ) os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=snake_case__ ) # Tests require submodules as they have parent imports with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , '''w''' ): pass shutil.move( f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , ) shutil.move( f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , ) def remove_copy_lines(snake_case__ ): with open(snake_case__ , '''r''' ) as f: UpperCAmelCase : Tuple =f.readlines() with open(snake_case__ , '''w''' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(snake_case__ ) if output_pytorch: if not self._testing: remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' ) shutil.move( f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , ) shutil.move( f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , ) else: os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' ) os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' ) if output_tensorflow: if not self._testing: remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) shutil.move( f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , ) shutil.move( f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , ) else: os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' ) if output_flax: if not self._testing: remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) shutil.move( f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , ) shutil.move( f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , ) else: os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' ) shutil.move( f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , ) shutil.move( f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , ) shutil.move( f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(snake_case__ , snake_case__ , snake_case__ ): # Create temp file UpperCAmelCase , UpperCAmelCase : Optional[Any] =mkstemp() UpperCAmelCase : Dict =False with fdopen(snake_case__ , '''w''' ) as new_file: with open(snake_case__ ) as old_file: for line in old_file: new_file.write(snake_case__ ) if line_to_copy_below in line: UpperCAmelCase : List[str] =True for line_to_copy in lines_to_copy: new_file.write(snake_case__ ) if not line_found: raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' ) # Copy the file permissions from the old file to the new file copymode(snake_case__ , snake_case__ ) # Remove original file remove(snake_case__ ) # Move new file move(snake_case__ , snake_case__ ) def skip_units(snake_case__ ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(snake_case__ ): with open(snake_case__ ) as datafile: UpperCAmelCase : List[Any] =[] UpperCAmelCase : List[Any] =False UpperCAmelCase : List[str] =False for line in datafile: if "# To replace in: " in line and "##" not in line: UpperCAmelCase : Union[str, Any] =line.split('''"''' )[1] UpperCAmelCase : Any =skip_units(snake_case__ ) elif "# Below: " in line and "##" not in line: UpperCAmelCase : List[Any] =line.split('''"''' )[1] UpperCAmelCase : Optional[int] =skip_units(snake_case__ ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : Optional[Any] =[] elif "# Replace with" in line and "##" not in line: UpperCAmelCase : List[str] =[] elif "##" not in line: lines_to_copy.append(snake_case__ ) remove(snake_case__ ) replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' ) os.rmdir(snake_case__ )
348
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __snake_case ( unittest.TestCase ): @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase : Any =UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Tuple =self.dummy_uncond_unet UpperCAmelCase : Optional[int] =KarrasVeScheduler() UpperCAmelCase : List[Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : List[str] =torch.manual_seed(0 ) UpperCAmelCase : List[str] =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' ).images UpperCAmelCase : str =torch.manual_seed(0 ) UpperCAmelCase : str =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' , return_dict=snake_case__ )[0] UpperCAmelCase : Any =image[0, -3:, -3:, -1] UpperCAmelCase : List[str] =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase : int =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Tuple ='''google/ncsnpp-celebahq-256''' UpperCAmelCase : int =UNetaDModel.from_pretrained(snake_case__ ) UpperCAmelCase : Dict =KarrasVeScheduler() UpperCAmelCase : Union[str, Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Any =torch.manual_seed(0 ) UpperCAmelCase : Tuple =pipe(num_inference_steps=20 , generator=snake_case__ , output_type='''numpy''' ).images UpperCAmelCase : Optional[int] =image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) UpperCAmelCase : Tuple =np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
348
1
import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : Optional[int]=5 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Union[str, Any]=37 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[int]="None" , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Union[str, Any]=None , ) ->Dict: '''simple docstring''' lowerCamelCase__: int =parent lowerCamelCase__: Union[str, Any] =batch_size lowerCamelCase__: List[Any] =seq_length lowerCamelCase__: List[Any] =is_training lowerCamelCase__: List[str] =use_input_mask lowerCamelCase__: str =use_token_type_ids lowerCamelCase__: List[str] =use_labels lowerCamelCase__: Dict =vocab_size lowerCamelCase__: Tuple =hidden_size lowerCamelCase__: str =num_hidden_layers lowerCamelCase__: List[str] =num_attention_heads lowerCamelCase__: Any =intermediate_size lowerCamelCase__: Dict =hidden_act lowerCamelCase__: int =hidden_dropout_prob lowerCamelCase__: Tuple =attention_probs_dropout_prob lowerCamelCase__: Union[str, Any] =max_position_embeddings lowerCamelCase__: List[str] =type_vocab_size lowerCamelCase__: List[Any] =type_sequence_label_size lowerCamelCase__: List[str] =initializer_range lowerCamelCase__: Optional[int] =num_labels lowerCamelCase__: List[str] =num_choices lowerCamelCase__: Optional[Any] =relative_attention lowerCamelCase__: Tuple =position_biased_input lowerCamelCase__: Optional[int] =pos_att_type lowerCamelCase__: int =scope def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->str: '''simple docstring''' lowerCamelCase__: str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowerCamelCase__: Any =None if self.use_input_mask: lowerCamelCase__: Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) lowerCamelCase__: Dict =None if self.use_token_type_ids: lowerCamelCase__: Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) lowerCamelCase__: Any =None lowerCamelCase__: Union[str, Any] =None lowerCamelCase__: str =None if self.use_labels: lowerCamelCase__: Any =ids_tensor([self.batch_size] , self.type_sequence_label_size) lowerCamelCase__: Any =ids_tensor([self.batch_size, self.seq_length] , self.num_labels) lowerCamelCase__: Tuple =ids_tensor([self.batch_size] , self.num_choices) lowerCamelCase__: int =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Dict: '''simple docstring''' return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any]) ->Dict: '''simple docstring''' self.parent.assertListEqual(list(result.loss.size()) , []) def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Optional[int] =DebertaVaModel(config=UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() lowerCamelCase__: List[str] =model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)[0] lowerCamelCase__: int =model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)[0] lowerCamelCase__: List[str] =model(UpperCAmelCase_)[0] self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size]) def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =DebertaVaForMaskedLM(config=UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() lowerCamelCase__: Optional[Any] =model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int) ->Tuple: '''simple docstring''' lowerCamelCase__: Tuple =self.num_labels lowerCamelCase__: Optional[Any] =DebertaVaForSequenceClassification(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() lowerCamelCase__: Any =model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_) self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels]) self.check_loss_output(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str) ->Optional[int]: '''simple docstring''' lowerCamelCase__: List[Any] =self.num_labels lowerCamelCase__: str =DebertaVaForTokenClassification(config=UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() lowerCamelCase__: List[str] =model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]) ->List[Any]: '''simple docstring''' lowerCamelCase__: Union[str, Any] =DebertaVaForQuestionAnswering(config=UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() lowerCamelCase__: Any =model( UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]) ->List[str]: '''simple docstring''' lowerCamelCase__: Optional[Any] =DebertaVaForMultipleChoice(config=UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() lowerCamelCase__: List[str] =input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() lowerCamelCase__: List[Any] =token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() lowerCamelCase__: Union[str, Any] =input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() lowerCamelCase__: Union[str, Any] =model( UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple: '''simple docstring''' lowerCamelCase__: str =self.prepare_config_and_inputs() ( lowerCamelCase__ ): int =config_and_inputs lowerCamelCase__: List[str] ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) lowercase_ = ( { "feature-extraction": DebertaVaModel, "fill-mask": DebertaVaForMaskedLM, "question-answering": DebertaVaForQuestionAnswering, "text-classification": DebertaVaForSequenceClassification, "token-classification": DebertaVaForTokenClassification, "zero-shot": DebertaVaForSequenceClassification, } if is_torch_available() else {} ) lowercase_ = True lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]: '''simple docstring''' lowerCamelCase__: Optional[int] =DebertaVaModelTester(self) lowerCamelCase__: Optional[int] =ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37) def SCREAMING_SNAKE_CASE_ (self : int) ->Dict: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : int) ->Dict: '''simple docstring''' lowerCamelCase__: Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]: '''simple docstring''' lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : int) ->str: '''simple docstring''' lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Any: '''simple docstring''' lowerCamelCase__: Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*UpperCAmelCase_) @slow def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->int: '''simple docstring''' for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__: Dict =DebertaVaModel.from_pretrained(UpperCAmelCase_) self.assertIsNotNone(UpperCAmelCase_) @require_torch @require_sentencepiece @require_tokenizers class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason="Model not available yet") def SCREAMING_SNAKE_CASE_ (self : int) ->List[Any]: '''simple docstring''' pass @slow def SCREAMING_SNAKE_CASE_ (self : Any) ->str: '''simple docstring''' lowerCamelCase__: Any =DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge") lowerCamelCase__: str =torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]]) lowerCamelCase__: Dict =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)[0] # compare the actual values for a slice. lowerCamelCase__: List[Any] =torch.tensor( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4) , F"""{output[:, 1:4, 1:4]}""")
369
import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Tuple: '''simple docstring''' lowerCamelCase__: List[str] =inspect.getfile(accelerate.test_utils) lowerCamelCase__: str =os.path.sep.join( mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps", "test_metrics.py"]) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 lowerCamelCase__: str =test_metrics @require_cpu def SCREAMING_SNAKE_CASE_ (self : int) ->Any: '''simple docstring''' debug_launcher(self.test_metrics.main , num_processes=1) @require_cpu def SCREAMING_SNAKE_CASE_ (self : int) ->int: '''simple docstring''' debug_launcher(self.test_metrics.main) @require_single_gpu def SCREAMING_SNAKE_CASE_ (self : Any) ->int: '''simple docstring''' self.test_metrics.main() @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict: '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices.""") lowerCamelCase__: Optional[Any] =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy())
273
0
from __future__ import annotations def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]: __snake_case = [] __snake_case = [] __snake_case = 0 __snake_case = sum(snake_case_ ) create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) return result def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None: if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum: return if sum(snake_case_ ) == max_sum: result.append(snake_case_ ) return for index in range(snake_case_ , len(snake_case_ ) ): create_state_space_tree( snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , ) snake_case_ = [3, 34, 4, 12, 5, 2] snake_case_ = 9 snake_case_ = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
24
'''simple docstring''' from __future__ import annotations def _UpperCamelCase ( __A ) -> float: '''simple docstring''' UpperCamelCase__ = 0.00 UpperCamelCase__ = 0 for resistor in resistors: if resistor <= 0: UpperCamelCase__ = F'''Resistor at index {index} has a negative or zero value!''' raise ValueError(__A ) first_sum += 1 / float(__A ) index += 1 return 1 / first_sum def _UpperCamelCase ( __A ) -> float: '''simple docstring''' UpperCamelCase__ = 0.00 UpperCamelCase__ = 0 for resistor in resistors: sum_r += resistor if resistor < 0: UpperCamelCase__ = F'''Resistor at index {index} has a negative value!''' raise ValueError(__A ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
80
0
UpperCAmelCase_ : Optional[Any] = tuple[float, float, float] UpperCAmelCase_ : Union[str, Any] = tuple[float, float, float] def UpperCamelCase ( _A : int , _A : List[str] )-> Vectorad: """simple docstring""" A__ = end_pointa[0] - end_pointa[0] A__ = end_pointa[1] - end_pointa[1] A__ = end_pointa[2] - end_pointa[2] return (x, y, z) def UpperCamelCase ( _A : Tuple , _A : List[str] )-> Vectorad: """simple docstring""" A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def UpperCamelCase ( _A : str , _A : int )-> bool: """simple docstring""" return tuple(round(a__ , a__ ) for x in vector ) == (0, 0, 0) def UpperCamelCase ( _A : Optional[int] , _A : Dict , _A : Union[str, Any] , _A : Optional[int] = 10 )-> bool: """simple docstring""" A__ = create_vector(a__ , a__ ) A__ = create_vector(a__ , a__ ) return is_zero_vector(get_ad_vectors_cross(a__ , a__ ) , a__ )
361
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : List[Any] = { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class UpperCamelCase ( _UpperCAmelCase ): lowerCAmelCase : int = """gpt_neox""" def __init__( self , UpperCAmelCase__=50_432 , UpperCAmelCase__=6_144 , UpperCAmelCase__=44 , UpperCAmelCase__=64 , UpperCAmelCase__=24_576 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.25 , UpperCAmelCase__=10_000 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.1 , UpperCAmelCase__=2_048 , UpperCAmelCase__=0.02 , UpperCAmelCase__=1e-5 , UpperCAmelCase__=True , UpperCAmelCase__=0 , UpperCAmelCase__=2 , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ): super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = rotary_pct A__ = rotary_emb_base A__ = attention_dropout A__ = hidden_dropout A__ = classifier_dropout A__ = initializer_range A__ = layer_norm_eps A__ = use_cache A__ = tie_word_embeddings A__ = use_parallel_residual A__ = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def __A ( self ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , UpperCAmelCase__ ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " F"""got {self.rope_scaling}""" ) A__ = self.rope_scaling.get("type" , UpperCAmelCase__ ) A__ = self.rope_scaling.get("factor" , UpperCAmelCase__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
198
0
'''simple docstring''' import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path __lowerCamelCase = [ {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''}, {'''dataset''': '''snli''', '''config_name''': '''plain_text'''}, {'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''}, {'''dataset''': '''wiki40b''', '''config_name''': '''en'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''}, {'''dataset''': '''natural_questions''', '''config_name''': '''default'''}, ] def UpperCAmelCase__ ( UpperCAmelCase__=True ) -> int: if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__SCREAMING_SNAKE_CASE ) ) class A__ ( __SCREAMING_SNAKE_CASE ): lowercase = None lowercase = None def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' with TemporaryDirectory() as tmp_dir: A_ = dataset_module_factory(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ) A_ = import_main_class(dataset_module.module_path , dataset=UpperCAmelCase_ ) A_ = builder_cls( cache_dir=UpperCAmelCase_ , config_name=UpperCAmelCase_ , hash=dataset_module.hash , ) A_ = "/".join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=UpperCAmelCase_ ).replace(os.sep , """/""" ), config.DATASET_INFO_FILENAME, ] ) A_ = cached_path(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ) self.assertTrue(os.path.exists(UpperCAmelCase_ ) ) @pytest.mark.integration def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict: A_ = tmp_path_factory.mktemp("""test_hf_gcp""" ) / "test_wikipedia_simple" A_ = dataset_module_factory("""wikipedia""", cache_dir=__a ) A_ = import_main_class(dataset_module.module_path ) A_ = builder_cls( cache_dir=__a, config_name="""20220301.frr""", hash=dataset_module.hash, ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam A_ = None builder_instance.download_and_prepare() A_ = builder_instance.as_dataset() assert ds @pytest.mark.integration def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]: A_ = dataset_module_factory("""wikipedia""", cache_dir=__a ) A_ = import_main_class(dataset_module.module_path, dataset=__a ) A_ = builder_cls( cache_dir=__a, config_name="""20220301.frr""", hash=dataset_module.hash, ) A_ = builder_instance.as_streaming_dataset() assert ds assert isinstance(__a, __a ) assert "train" in ds assert isinstance(ds["""train"""], __a ) assert next(iter(ds["""train"""] ) )
162
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) lowercase_ = Features({"image": Image()} ) lowercase_ = Features({"labels": ClassLabel} ) lowercase_ = "image" lowercase_ = "labels" def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Tuple: '''simple docstring''' if self.label_column not in features: raise ValueError(F"""Column {self.label_column} is not present in features.""") if not isinstance(features[self.label_column] , UpperCAmelCase_): raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""") lowerCamelCase__: List[Any] =copy.deepcopy(self) lowerCamelCase__: Optional[int] =self.label_schema.copy() lowerCamelCase__: int =features[self.label_column] lowerCamelCase__: int =label_schema return task_template @property def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict[str, str]: '''simple docstring''' return { self.image_column: "image", self.label_column: "labels", }
10
0
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _lowercase ( lowerCAmelCase, unittest.TestCase ): """simple docstring""" __A = DiTPipeline __A = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __A = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } __A = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __A = False def UpperCamelCase_ (self ): """simple docstring""" torch.manual_seed(0 ) a = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase_ , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=lowerCamelCase_ , ) a = AutoencoderKL() a = DDIMScheduler() a = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=0 ): """simple docstring""" if str(lowerCamelCase_ ).startswith("mps" ): a = torch.manual_seed(lowerCamelCase_ ) else: a = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) a = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def UpperCamelCase_ (self ): """simple docstring""" a = "cpu" a = self.get_dummy_components() a = self.pipeline_class(**lowerCamelCase_ ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) a = self.get_dummy_inputs(lowerCamelCase_ ) a = pipe(**lowerCamelCase_ ).images a = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) a = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) a = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase_ , 1E-3 ) def UpperCamelCase_ (self ): """simple docstring""" self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase_ , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def UpperCamelCase_ (self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class _lowercase ( unittest.TestCase ): """simple docstring""" def UpperCamelCase_ (self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ (self ): """simple docstring""" a = torch.manual_seed(0 ) a = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" ) pipe.to("cuda" ) a = ["vase", "umbrella", "white shark", "white wolf"] a = pipe.get_label_ids(lowerCamelCase_ ) a = pipe(lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=40 , output_type="np" ).images for word, image in zip(lowerCamelCase_ , lowerCamelCase_ ): a = load_numpy( F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-2 def UpperCamelCase_ (self ): """simple docstring""" a = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" ) a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("cuda" ) a = ["vase", "umbrella"] a = pipe.get_label_ids(lowerCamelCase_ ) a = torch.manual_seed(0 ) a = pipe(lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=25 , output_type="np" ).images for word, image in zip(lowerCamelCase_ , lowerCamelCase_ ): a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" F'''/dit/{word}_512.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-1
356
import os def a( ) -> List[str]: """simple docstring""" with open(os.path.dirname(A ) + "/grid.txt" ) as f: a = [] # noqa: E741 for _ in range(20 ): l.append([int(A ) for x in f.readline().split()] ) a = 0 # right for i in range(20 ): for j in range(17 ): a = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: a = temp # down for i in range(17 ): for j in range(20 ): a = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: a = temp # diagonal 1 for i in range(17 ): for j in range(17 ): a = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: a = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): a = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: a = temp return maximum if __name__ == "__main__": print(solution())
71
0