code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def UpperCAmelCase_ ( UpperCAmelCase__ ): # Make sure the supplied data is a bytes-like object if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = F'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(UpperCAmelCase__ ) lowercase_ = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data ) lowercase_ = len(UpperCAmelCase__ ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase_ = B"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6) else: lowercase_ = B"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode() + padding ) def UpperCAmelCase_ ( UpperCAmelCase__ ): # Make sure encoded_data is either a string or a bytes-like object if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = ( """argument should be a bytes-like object or ASCII string, """ F'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(UpperCAmelCase__ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): try: lowercase_ = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) lowercase_ = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase_ = encoded_data[:-padding] lowercase_ = """""".join( bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase_ = """""".join( bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data ) lowercase_ = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(UpperCAmelCase__ ) , 8 ) ] return bytes(UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
650
def UpperCAmelCase_ ( UpperCAmelCase__ ): if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise TypeError("""Input value must be an 'int' type""" ) lowercase_ = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
650
1
from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json' # See all FNet models at https://huggingface.co/models?filter=fnet } class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : int = 'fnet' def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=32_000 , UpperCamelCase__ : List[str]=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Optional[Any]=3_072 , UpperCamelCase__ : str="gelu_new" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Union[str, Any]=1e-12 , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Tuple=2 , **UpperCamelCase__ : Dict , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) lowercase_ = vocab_size lowercase_ = max_position_embeddings lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = initializer_range lowercase_ = type_vocab_size lowercase_ = layer_norm_eps lowercase_ = use_tpu_fourier_optimizations lowercase_ = tpu_short_seq_length
650
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ): @register_to_config def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ): '''simple docstring''' super().__init__() lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = False lowercase_ = nn.Dropout(p=UpperCamelCase__ ) lowercase_ = TaConfig( vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , ) lowercase_ = nn.ModuleList() for lyr_num in range(UpperCamelCase__ ): lowercase_ = TaBlock(UpperCamelCase__ ) self.encoders.append(UpperCamelCase__ ) lowercase_ = TaLayerNorm(UpperCamelCase__ ) lowercase_ = nn.Dropout(p=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ): '''simple docstring''' lowercase_ = self.token_embedder(UpperCamelCase__ ) lowercase_ = encoder_input_tokens.shape[1] lowercase_ = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device ) x += self.position_encoding(UpperCamelCase__ ) lowercase_ = self.dropout_pre(UpperCamelCase__ ) # inverted the attention mask lowercase_ = encoder_input_tokens.size() lowercase_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ ) for lyr in self.encoders: lowercase_ = lyr(UpperCamelCase__ , UpperCamelCase__ )[0] lowercase_ = self.layer_norm(UpperCamelCase__ ) return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
650
1
import socket def UpperCAmelCase_ ( ): lowercase_ = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) lowercase_ = socket.gethostname() lowercase_ = 1_2_3_1_2 sock.connect((host, port) ) sock.send(B"""Hello server!""" ) with open("""Received_file""" , """wb""" ) as out_file: print("""File opened""" ) print("""Receiving data...""" ) while True: lowercase_ = sock.recv(1_0_2_4 ) if not data: break out_file.write(UpperCAmelCase__ ) print("""Successfully received the file""" ) sock.close() print("""Connection closed""" ) if __name__ == "__main__": main()
650
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar a = TypeVar('T') class UpperCamelCase__ ( Generic[T] ): __SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys __SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache __SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache def __init__( self : str , UpperCamelCase__ : int ): '''simple docstring''' lowercase_ = deque() lowercase_ = set() if not n: lowercase_ = sys.maxsize elif n < 0: raise ValueError("""n should be an integer greater than 0.""" ) else: lowercase_ = n def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : T ): '''simple docstring''' if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: lowercase_ = self.dq_store.pop() self.key_reference.remove(UpperCamelCase__ ) else: self.dq_store.remove(UpperCamelCase__ ) self.dq_store.appendleft(UpperCamelCase__ ) self.key_reference.add(UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' for k in self.dq_store: print(UpperCamelCase__ ) def __repr__( self : Optional[Any] ): '''simple docstring''' return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() a = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
650
1
from __future__ import annotations a = [True] * 1_0_0_0_0_0_1 a = 2 while i * i <= 1_0_0_0_0_0_0: if seive[i]: for j in range(i * i, 1_0_0_0_0_0_1, i): a = False i += 1 def UpperCAmelCase_ ( UpperCAmelCase__ ): return seive[n] def UpperCAmelCase_ ( UpperCAmelCase__ ): return any(digit in """02468""" for digit in str(UpperCAmelCase__ ) ) def UpperCAmelCase_ ( UpperCAmelCase__ = 1_0_0_0_0_0_0 ): lowercase_ = [2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(UpperCAmelCase__ ) and not contains_an_even_digit(UpperCAmelCase__ ): lowercase_ = str(UpperCAmelCase__ ) lowercase_ = [int(str_num[j:] + str_num[:j] ) for j in range(len(UpperCAmelCase__ ) )] if all(is_prime(UpperCAmelCase__ ) for i in list_nums ): result.append(UpperCAmelCase__ ) return result def UpperCAmelCase_ ( ): return len(find_circular_primes() ) if __name__ == "__main__": print(F'''{len(find_circular_primes()) = }''')
650
def UpperCAmelCase_ ( UpperCAmelCase__ ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
650
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a = { 'configuration_distilbert': [ 'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DistilBertConfig', 'DistilBertOnnxConfig', ], 'tokenization_distilbert': ['DistilBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['DistilBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ 'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DistilBertForMaskedLM', 'DistilBertForMultipleChoice', 'DistilBertForQuestionAnswering', 'DistilBertForSequenceClassification', 'DistilBertForTokenClassification', 'DistilBertModel', 'DistilBertPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ 'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDistilBertForMaskedLM', 'TFDistilBertForMultipleChoice', 'TFDistilBertForQuestionAnswering', 'TFDistilBertForSequenceClassification', 'TFDistilBertForTokenClassification', 'TFDistilBertMainLayer', 'TFDistilBertModel', 'TFDistilBertPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ 'FlaxDistilBertForMaskedLM', 'FlaxDistilBertForMultipleChoice', 'FlaxDistilBertForQuestionAnswering', 'FlaxDistilBertForSequenceClassification', 'FlaxDistilBertForTokenClassification', 'FlaxDistilBertModel', 'FlaxDistilBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
650
def UpperCAmelCase_ ( UpperCAmelCase__=2_8_1_2_3 ): lowercase_ = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i lowercase_ = set() lowercase_ = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(UpperCAmelCase__ ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
650
1
def UpperCAmelCase_ ( UpperCAmelCase__ ): if length <= 0 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError("""Length must be a positive integer.""" ) return [n * (2 * n - 1) for n in range(UpperCAmelCase__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=1_0))
650
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class UpperCamelCase__ : def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ): '''simple docstring''' lowercase_ = parent lowercase_ = batch_size lowercase_ = seq_length lowercase_ = is_training lowercase_ = use_input_mask lowercase_ = use_token_type_ids lowercase_ = use_labels lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = num_labels lowercase_ = num_choices lowercase_ = scope def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ = None if self.use_input_mask: lowercase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ = None if self.use_token_type_ids: lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase_ = None lowercase_ = None lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase_ = ids_tensor([self.batch_size] , self.num_choices ) lowercase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ): '''simple docstring''' lowercase_ = OpenLlamaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' lowercase_ = True lowercase_ = OpenLlamaModel(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , ) lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , ) lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ): '''simple docstring''' lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ): '''simple docstring''' lowercase_ = True lowercase_ = True lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() # first forward pass lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , ) lowercase_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 ) lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0] lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0] # select random slice lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = config_and_inputs lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else () __SCREAMING_SNAKE_CASE : List[Any] = ( { 'feature-extraction': OpenLlamaModel, 'text-classification': OpenLlamaForSequenceClassification, 'text-generation': OpenLlamaForCausalLM, 'zero-shot': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : Optional[int] = False def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = OpenLlamaModelTester(self ) lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = 3 lowercase_ = input_dict["""input_ids"""] lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ ) lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = 3 lowercase_ = """single_label_classification""" lowercase_ = input_dict["""input_ids"""] lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ ) lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = 3 lowercase_ = """multi_label_classification""" lowercase_ = input_dict["""input_ids"""] lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ ) lowercase_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = ids_tensor([1, 10] , config.vocab_size ) lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase_ = OpenLlamaModel(UpperCamelCase__ ) original_model.to(UpperCamelCase__ ) original_model.eval() lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase_ = {"""type""": scaling_type, """factor""": 10.0} lowercase_ = OpenLlamaModel(UpperCamelCase__ ) scaled_model.to(UpperCamelCase__ ) scaled_model.eval() lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
650
1
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : str = ['vqvae'] def __init__( self : Union[str, Any] , UpperCamelCase__ : AutoencoderKL , UpperCamelCase__ : UNetaDConditionModel , UpperCamelCase__ : Mel , UpperCamelCase__ : Union[DDIMScheduler, DDPMScheduler] , ): '''simple docstring''' super().__init__() self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , mel=UpperCamelCase__ , vqvae=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' return 50 if isinstance(self.scheduler , UpperCamelCase__ ) else 1_000 @torch.no_grad() def __call__( self : List[Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : str = None , UpperCamelCase__ : np.ndarray = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = None , UpperCamelCase__ : torch.Generator = None , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 0 , UpperCamelCase__ : torch.Generator = None , UpperCamelCase__ : float = 0 , UpperCamelCase__ : torch.Tensor = None , UpperCamelCase__ : torch.Tensor = None , UpperCamelCase__ : Union[str, Any]=True , ): '''simple docstring''' lowercase_ = steps or self.get_default_steps() self.scheduler.set_timesteps(UpperCamelCase__ ) lowercase_ = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: lowercase_ = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: lowercase_ = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=UpperCamelCase__ , device=self.device , ) lowercase_ = noise lowercase_ = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = self.mel.audio_slice_to_image(UpperCamelCase__ ) lowercase_ = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape( (input_image.height, input_image.width) ) lowercase_ = (input_image / 255) * 2 - 1 lowercase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: lowercase_ = self.vqvae.encode(torch.unsqueeze(UpperCamelCase__ , 0 ) ).latent_dist.sample( generator=UpperCamelCase__ )[0] lowercase_ = self.vqvae.config.scaling_factor * input_images if start_step > 0: lowercase_ = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , self.scheduler.timesteps[start_step - 1] ) lowercase_ = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) lowercase_ = int(mask_start_secs * pixels_per_second ) lowercase_ = int(mask_end_secs * pixels_per_second ) lowercase_ = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , UpperCamelCase__ ): lowercase_ = self.unet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )["""sample"""] else: lowercase_ = self.unet(UpperCamelCase__ , UpperCamelCase__ )["""sample"""] if isinstance(self.scheduler , UpperCamelCase__ ): lowercase_ = self.scheduler.step( model_output=UpperCamelCase__ , timestep=UpperCamelCase__ , sample=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , )["""prev_sample"""] else: lowercase_ = self.scheduler.step( model_output=UpperCamelCase__ , timestep=UpperCamelCase__ , sample=UpperCamelCase__ , generator=UpperCamelCase__ , )["""prev_sample"""] if mask is not None: if mask_start > 0: lowercase_ = mask[:, step, :, :mask_start] if mask_end > 0: lowercase_ = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance lowercase_ = 1 / self.vqvae.config.scaling_factor * images lowercase_ = self.vqvae.decode(UpperCamelCase__ )["""sample"""] lowercase_ = (images / 2 + 0.5).clamp(0 , 1 ) lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() lowercase_ = (images * 255).round().astype("""uint8""" ) lowercase_ = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(UpperCamelCase__ , mode="""RGB""" ).convert("""L""" ) for _ in images) ) lowercase_ = [self.mel.image_to_audio(UpperCamelCase__ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(UpperCamelCase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCamelCase__ ) ) @torch.no_grad() def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : List[Image.Image] , UpperCamelCase__ : int = 50 ): '''simple docstring''' assert isinstance(self.scheduler , UpperCamelCase__ ) self.scheduler.set_timesteps(UpperCamelCase__ ) lowercase_ = np.array( [np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] ) lowercase_ = (sample / 255) * 2 - 1 lowercase_ = torch.Tensor(UpperCamelCase__ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): lowercase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps lowercase_ = self.scheduler.alphas_cumprod[t] lowercase_ = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) lowercase_ = 1 - alpha_prod_t lowercase_ = self.unet(UpperCamelCase__ , UpperCamelCase__ )["""sample"""] lowercase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output lowercase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) lowercase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def UpperCAmelCase__ ( UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : float ): '''simple docstring''' lowercase_ = acos(torch.dot(torch.flatten(UpperCamelCase__ ) , torch.flatten(UpperCamelCase__ ) ) / torch.norm(UpperCamelCase__ ) / torch.norm(UpperCamelCase__ ) ) return sin((1 - alpha) * theta ) * xa / sin(UpperCamelCase__ ) + sin(alpha * theta ) * xa / sin(UpperCamelCase__ )
650
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: a = False a = logging.get_logger(__name__) a = 'ybelkada/fonts' def UpperCAmelCase_ ( ): if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ''' """Pix2StructImageProcessor. Please upgrade torch.""" ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): requires_backends(UpperCAmelCase__ , ["""torch"""] ) _check_torch_version() lowercase_ = image_tensor.unsqueeze(0 ) lowercase_ = torch.nn.functional.unfold(UpperCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) ) lowercase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase__ , UpperCAmelCase__ , -1 ) lowercase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 3_6 , UpperCAmelCase__ = "black" , UpperCAmelCase__ = "white" , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , ): requires_backends(UpperCAmelCase__ , """vision""" ) # Add new lines so that each line is no more than 80 characters. lowercase_ = textwrap.TextWrapper(width=8_0 ) lowercase_ = wrapper.wrap(text=UpperCAmelCase__ ) lowercase_ = """\n""".join(UpperCAmelCase__ ) if font_bytes is not None and font_path is None: lowercase_ = io.BytesIO(UpperCAmelCase__ ) elif font_path is not None: lowercase_ = font_path else: lowercase_ = hf_hub_download(UpperCAmelCase__ , """Arial.TTF""" ) lowercase_ = ImageFont.truetype(UpperCAmelCase__ , encoding="""UTF-8""" , size=UpperCAmelCase__ ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. lowercase_ = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , UpperCAmelCase__ ) ) lowercase_ , lowercase_ , lowercase_ , lowercase_ = temp_draw.textbbox((0, 0) , UpperCAmelCase__ , UpperCAmelCase__ ) # Create the actual image with a bit of padding around the text. lowercase_ = text_width + left_padding + right_padding lowercase_ = text_height + top_padding + bottom_padding lowercase_ = Image.new("""RGB""" , (image_width, image_height) , UpperCAmelCase__ ) lowercase_ = ImageDraw.Draw(UpperCAmelCase__ ) draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase__ , fill=UpperCAmelCase__ , font=UpperCAmelCase__ ) return image def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ): requires_backends(UpperCAmelCase__ , """vision""" ) # Convert to PIL image if necessary lowercase_ = to_pil_image(UpperCAmelCase__ ) lowercase_ = render_text(UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase_ = max(header_image.width , image.width ) lowercase_ = int(image.height * (new_width / image.width) ) lowercase_ = int(header_image.height * (new_width / header_image.width) ) lowercase_ = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary lowercase_ = to_numpy_array(UpperCAmelCase__ ) if infer_channel_dimension_format(UpperCAmelCase__ ) == ChannelDimension.LAST: lowercase_ = to_channel_dimension_format(UpperCAmelCase__ , ChannelDimension.LAST ) return new_image class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Tuple = ['flattened_patches'] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 2_048 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} lowercase_ = do_normalize lowercase_ = do_convert_rgb lowercase_ = max_patches lowercase_ = is_vqa def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : dict , **UpperCamelCase__ : Optional[int] ): '''simple docstring''' requires_backends(self.extract_flattened_patches , """torch""" ) _check_torch_version() # convert to torch lowercase_ = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST ) lowercase_ = torch.from_numpy(UpperCamelCase__ ) lowercase_ , lowercase_ = patch_size["""height"""], patch_size["""width"""] lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ ) # maximize scale s.t. lowercase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) lowercase_ = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 ) lowercase_ = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 ) lowercase_ = max(num_feasible_rows * patch_height , 1 ) lowercase_ = max(num_feasible_cols * patch_width , 1 ) lowercase_ = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] lowercase_ = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = patches.shape lowercase_ = patches_shape[1] lowercase_ = patches_shape[2] lowercase_ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] lowercase_ = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] lowercase_ = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] ) lowercase_ = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] lowercase_ = row_ids.to(torch.floataa ) lowercase_ = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] lowercase_ = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] lowercase_ = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float() lowercase_ = to_numpy_array(UpperCamelCase__ ) return result def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict ): '''simple docstring''' if image.dtype == np.uinta: lowercase_ = image.astype(np.floataa ) # take mean across the whole `image` lowercase_ = np.mean(UpperCamelCase__ ) lowercase_ = np.std(UpperCamelCase__ ) lowercase_ = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ): '''simple docstring''' lowercase_ = do_normalize if do_normalize is not None else self.do_normalize lowercase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase_ = patch_size if patch_size is not None else self.patch_size lowercase_ = max_patches if max_patches is not None else self.max_patches lowercase_ = self.is_vqa if kwargs.get("""data_format""" , UpperCamelCase__ ) is not None: raise ValueError("""data_format is not an accepted input as the outputs are """ ) lowercase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase_ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if is_vqa: if header_text is None: raise ValueError("""A header text must be provided for VQA models.""" ) lowercase_ = kwargs.pop("""font_bytes""" , UpperCamelCase__ ) lowercase_ = kwargs.pop("""font_path""" , UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = [header_text] * len(UpperCamelCase__ ) lowercase_ = [ render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ ) for i, image in enumerate(UpperCamelCase__ ) ] if do_normalize: lowercase_ = [self.normalize(image=UpperCamelCase__ ) for image in images] # convert to torch tensor and permute lowercase_ = [ self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ ) for image in images ] # create attention mask in numpy lowercase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] lowercase_ = BatchFeature( data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=UpperCamelCase__ ) return encoded_outputs
650
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging a = logging.get_logger(__name__) a = '▁' a = {'vocab_file': 'sentencepiece.bpe.model'} a = { 'vocab_file': { 'facebook/mbart-large-50-one-to-many-mmt': ( 'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model' ), } } a = { 'facebook/mbart-large-50-one-to-many-mmt': 1_0_2_4, } # fmt: off a = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI'] class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask'] __SCREAMING_SNAKE_CASE : List[int] = [] __SCREAMING_SNAKE_CASE : List[int] = [] def __init__( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Optional[Any]="<s>" , UpperCamelCase__ : List[Any]="<unk>" , UpperCamelCase__ : List[str]="<pad>" , UpperCamelCase__ : Dict="<mask>" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : List[str] , ): '''simple docstring''' lowercase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs lowercase_ = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , ) lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase__ ) ) lowercase_ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token lowercase_ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase_ = 1 lowercase_ = len(self.sp_model ) lowercase_ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase__ ) } lowercase_ = {v: k for k, v in self.lang_code_to_id.items()} lowercase_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) lowercase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} lowercase_ = src_lang if src_lang is not None else """en_XX""" lowercase_ = self.lang_code_to_id[self._src_lang] lowercase_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def UpperCAmelCase__ ( self : int ): '''simple docstring''' return self._src_lang @src_lang.setter def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str ): '''simple docstring''' lowercase_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : Dict ): '''simple docstring''' lowercase_ = self.__dict__.copy() lowercase_ = None return state def __setstate__( self : Union[str, Any] , UpperCamelCase__ : Dict ): '''simple docstring''' lowercase_ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowercase_ = {} lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str ): '''simple docstring''' return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase_ = self.sp_model.PieceToId(UpperCamelCase__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : int ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' lowercase_ = [] lowercase_ = """""" lowercase_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCamelCase__ ) + token lowercase_ = True lowercase_ = [] else: current_sub_tokens.append(UpperCamelCase__ ) lowercase_ = False out_string += self.sp_model.decode(UpperCamelCase__ ) return out_string.strip() def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase__ , """wb""" ) as fi: lowercase_ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase__ ) return (out_vocab_file,) def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) lowercase_ = [1] * len(self.prefix_tokens ) lowercase_ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(UpperCamelCase__ )) + suffix_ones return prefix_ones + ([0] * len(UpperCamelCase__ )) + ([0] * len(UpperCamelCase__ )) + suffix_ones def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : Dict ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) lowercase_ = src_lang lowercase_ = self(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) lowercase_ = self.convert_tokens_to_ids(UpperCamelCase__ ) lowercase_ = tgt_lang_id return inputs def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = "en_XX" , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "ro_RO" , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' lowercase_ = src_lang lowercase_ = tgt_lang return super().prepare_seqaseq_batch(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : str ): '''simple docstring''' lowercase_ = self.lang_code_to_id[src_lang] lowercase_ = [self.cur_lang_code_id] lowercase_ = [self.eos_token_id] def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str ): '''simple docstring''' lowercase_ = self.lang_code_to_id[tgt_lang] lowercase_ = [self.cur_lang_code_id] lowercase_ = [self.eos_token_id]
650
import cva import numpy as np class UpperCamelCase__ : def __init__( self : List[str] , UpperCamelCase__ : float , UpperCamelCase__ : int ): '''simple docstring''' if k in (0.04, 0.06): lowercase_ = k lowercase_ = window_size else: raise ValueError("""invalid k value""" ) def __str__( self : Optional[int] ): '''simple docstring''' return str(self.k ) def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ): '''simple docstring''' lowercase_ = cva.imread(UpperCamelCase__ , 0 ) lowercase_ , lowercase_ = img.shape lowercase_ = [] lowercase_ = img.copy() lowercase_ = cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB ) lowercase_ , lowercase_ = np.gradient(UpperCamelCase__ ) lowercase_ = dx**2 lowercase_ = dy**2 lowercase_ = dx * dy lowercase_ = 0.04 lowercase_ = self.window_size // 2 for y in range(UpperCamelCase__ , h - offset ): for x in range(UpperCamelCase__ , w - offset ): lowercase_ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase_ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase_ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase_ = (wxx * wyy) - (wxy**2) lowercase_ = wxx + wyy lowercase_ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": a = HarrisCorner(0.04, 3) a , a = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
650
1
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging a = logging.get_logger(__name__) class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : str = ['pixel_values'] def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 8 , **UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = do_rescale lowercase_ = rescale_factor lowercase_ = do_pad lowercase_ = pad_size def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] ): '''simple docstring''' return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None ): '''simple docstring''' lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ ) lowercase_ = (old_height // size + 1) * size - old_height lowercase_ = (old_width // size + 1) * size - old_width return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ): '''simple docstring''' lowercase_ = do_rescale if do_rescale is not None else self.do_rescale lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ = do_pad if do_pad is not None else self.do_pad lowercase_ = pad_size if pad_size is not None else self.pad_size lowercase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_rescale: lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_pad: lowercase_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] lowercase_ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
650
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): a = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: a = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = (images / 2 + 0.5).clamp(0 , 1 ) lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowercase_ = numpy_to_pil(UpperCAmelCase__ ) return images def UpperCAmelCase_ ( UpperCAmelCase__ ): if images.ndim == 3: lowercase_ = images[None, ...] lowercase_ = (images * 2_5_5).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowercase_ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images] else: lowercase_ = [Image.fromarray(UpperCAmelCase__ ) for image in images] return pil_images
650
1
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() a = logging.get_logger(__name__) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = WavaVecaForSequenceClassification.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ ) lowercase_ = downstream_dict["""projector.weight"""] lowercase_ = downstream_dict["""projector.bias"""] lowercase_ = downstream_dict["""model.post_net.linear.weight"""] lowercase_ = downstream_dict["""model.post_net.linear.bias"""] return model def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = WavaVecaForAudioFrameClassification.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ ) lowercase_ = downstream_dict["""model.linear.weight"""] lowercase_ = downstream_dict["""model.linear.bias"""] return model def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = WavaVecaForXVector.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ ) lowercase_ = downstream_dict["""connector.weight"""] lowercase_ = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): lowercase_ = downstream_dict[ F'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] lowercase_ = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] lowercase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] lowercase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] lowercase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] lowercase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] lowercase_ = downstream_dict["""objective.W"""] return model @torch.no_grad() def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = torch.load(UpperCAmelCase__ , map_location="""cpu""" ) lowercase_ = checkpoint["""Downstream"""] lowercase_ = WavaVecaConfig.from_pretrained(UpperCAmelCase__ ) lowercase_ = WavaVecaFeatureExtractor.from_pretrained( UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ ) lowercase_ = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): lowercase_ = convert_classification(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) elif arch.endswith("""ForAudioFrameClassification""" ): lowercase_ = convert_diarization(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) elif arch.endswith("""ForXVector""" ): lowercase_ = convert_xvector(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) else: raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: lowercase_ = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(UpperCAmelCase__ ) hf_model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') a = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
650
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : str = (UnCLIPScheduler,) def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : int ): '''simple docstring''' lowercase_ = { """num_train_timesteps""": 1_000, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**UpperCamelCase__ ) return config def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config(variance_type="""fixed_small_log""" ) lowercase_ = scheduler_class(**UpperCamelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5 def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config(variance_type="""learned_range""" ) lowercase_ = scheduler_class(**UpperCamelCase__ ) lowercase_ = 0.5 assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1e-5 def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config() lowercase_ = scheduler_class(**UpperCamelCase__ ) lowercase_ = scheduler.timesteps lowercase_ = self.dummy_model() lowercase_ = self.dummy_sample_deter lowercase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ ) # 2. predict previous mean of sample x_t-1 lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample lowercase_ = pred_prev_sample lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3 def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config() lowercase_ = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(25 ) lowercase_ = scheduler.timesteps lowercase_ = self.dummy_model() lowercase_ = self.dummy_sample_deter lowercase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ ) if i + 1 == timesteps.shape[0]: lowercase_ = None else: lowercase_ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowercase_ = scheduler.step( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample lowercase_ = pred_prev_sample lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3 def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' pass def UpperCAmelCase__ ( self : int ): '''simple docstring''' pass
650
1
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ : def __init__( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : int=13 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Optional[int]=224 , UpperCamelCase__ : Optional[Any]=1_000 , UpperCamelCase__ : str=[3, 3, 6, 4] , UpperCamelCase__ : Dict=[48, 56, 112, 220] , ): '''simple docstring''' lowercase_ = parent lowercase_ = batch_size lowercase_ = num_channels lowercase_ = is_training lowercase_ = use_labels lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = num_labels lowercase_ = image_size lowercase_ = layer_depths lowercase_ = embed_dims def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.num_labels ) lowercase_ = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=UpperCamelCase__ , layer_scale_init_value=1e-5 , ) def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ): '''simple docstring''' lowercase_ = SwiftFormerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ): '''simple docstring''' lowercase_ = self.num_labels lowercase_ = SwiftFormerForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) lowercase_ = SwiftFormerForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' ((lowercase_) , (lowercase_) , (lowercase_)) = self.prepare_config_and_inputs() lowercase_ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () __SCREAMING_SNAKE_CASE : Optional[Any] = ( {'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification} if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Any = False __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : List[Any] = False def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = SwiftFormerModelTester(self ) lowercase_ = ConfigTester( self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = model_class(UpperCamelCase__ ) lowercase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = model_class(UpperCamelCase__ ) lowercase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ = [*signature.parameters.keys()] lowercase_ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @slow def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ = SwiftFormerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @unittest.skip(reason="""SwiftFormer does not output attentions""" ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' def check_hidden_states_output(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ): lowercase_ = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): lowercase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowercase_ = outputs.hidden_states lowercase_ = 8 self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(UpperCamelCase__ ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' def _config_zero_init(UpperCamelCase__ : Any ): lowercase_ = copy.deepcopy(UpperCamelCase__ ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(UpperCamelCase__ , UpperCamelCase__ , 1e-10 ) if isinstance(getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ ): lowercase_ = _config_zero_init(getattr(UpperCamelCase__ , UpperCamelCase__ ) ) setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return configs_no_init lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = _config_zero_init(UpperCamelCase__ ) for model_class in self.all_model_classes: lowercase_ = model_class(config=UpperCamelCase__ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' pass def UpperCAmelCase_ ( ): lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None @slow def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(UpperCamelCase__ ) lowercase_ = self.default_image_processor lowercase_ = prepare_img() lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowercase_ = model(**UpperCamelCase__ ) # verify the logits lowercase_ = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowercase_ = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
650
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : Optional[str] = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the training data.'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the validation data.'} ) __SCREAMING_SNAKE_CASE : Optional[float] = field( default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} ) __SCREAMING_SNAKE_CASE : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} ) __SCREAMING_SNAKE_CASE : float = field( default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = {} if self.train_dir is not None: lowercase_ = self.train_dir if self.validation_dir is not None: lowercase_ = self.validation_dir lowercase_ = data_files if data_files else None @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : str = field( default=__magic_name__ , metadata={ 'help': ( 'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ' 'checkpoint identifier on the hub. ' 'Don\'t set if you want to train a model from scratch.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , ) __SCREAMING_SNAKE_CASE : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) __SCREAMING_SNAKE_CASE : str = field(default=__magic_name__ , metadata={'help': 'Name or path of preprocessor config.'} ) __SCREAMING_SNAKE_CASE : bool = field( default=__magic_name__ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={'help': 'Stride to use for the encoder.'} , ) class UpperCamelCase__ : def __init__( self : Dict , UpperCamelCase__ : List[Any]=192 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : str=0.6 ): '''simple docstring''' lowercase_ = input_size lowercase_ = mask_patch_size lowercase_ = model_patch_size lowercase_ = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("""Input size must be divisible by mask patch size""" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("""Mask patch size must be divisible by model patch size""" ) lowercase_ = self.input_size // self.mask_patch_size lowercase_ = self.mask_patch_size // self.model_patch_size lowercase_ = self.rand_size**2 lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : int ): '''simple docstring''' lowercase_ = np.random.permutation(self.token_count )[: self.mask_count] lowercase_ = np.zeros(self.token_count , dtype=UpperCamelCase__ ) lowercase_ = 1 lowercase_ = mask.reshape((self.rand_size, self.rand_size) ) lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = torch.stack([example["""pixel_values"""] for example in examples] ) lowercase_ = torch.stack([example["""mask"""] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def UpperCAmelCase_ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mim""" , UpperCAmelCase__ , UpperCAmelCase__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase_ = training_args.get_process_log_level() logger.setLevel(UpperCAmelCase__ ) transformers.utils.logging.set_verbosity(UpperCAmelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowercase_ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase_ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. lowercase_ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. lowercase_ = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0: lowercase_ = ds["""train"""].train_test_split(data_args.train_val_split ) lowercase_ = split["""train"""] lowercase_ = split["""test"""] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase_ = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase__ ) elif model_args.model_name_or_path: lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ ) else: lowercase_ = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(UpperCAmelCase__ , """decoder_type""" ): lowercase_ = """simmim""" # adapt config lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size lowercase_ = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { """image_size""": model_args.image_size, """patch_size""": model_args.patch_size, """encoder_stride""": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ ) elif model_args.model_name_or_path: lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ ) else: lowercase_ = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: lowercase_ = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) lowercase_ = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase__ ) if training_args.do_train: lowercase_ = ds["""train"""].column_names else: lowercase_ = ds["""validation"""].column_names if data_args.image_column_name is not None: lowercase_ = data_args.image_column_name elif "image" in column_names: lowercase_ = """image""" elif "img" in column_names: lowercase_ = """img""" else: lowercase_ = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py lowercase_ = Compose( [ Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator lowercase_ = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(UpperCAmelCase__ ): lowercase_ = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]] lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: lowercase_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(UpperCAmelCase__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: lowercase_ = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(UpperCAmelCase__ ) # Initialize our trainer lowercase_ = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: lowercase_ = None if training_args.resume_from_checkpoint is not None: lowercase_ = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase_ = last_checkpoint lowercase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowercase_ = trainer.evaluate() trainer.log_metrics("""eval""" , UpperCAmelCase__ ) trainer.save_metrics("""eval""" , UpperCAmelCase__ ) # Write model card and (optionally) push to hub lowercase_ = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """masked-image-modeling""", """dataset""": data_args.dataset_name, """tags""": ["""masked-image-modeling"""], } if training_args.push_to_hub: trainer.push_to_hub(**UpperCAmelCase__ ) else: trainer.create_model_card(**UpperCAmelCase__ ) if __name__ == "__main__": main()
650
1
import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() a = [ 'word_embeddings_layernorm.weight', 'word_embeddings_layernorm.bias', 'input_layernorm.weight', 'input_layernorm.bias', 'post_attention_layernorm.weight', 'post_attention_layernorm.bias', 'self_attention.dense.bias', 'mlp.dense_4h_to_h.bias', 'ln_f.weight', 'ln_f.bias', ] a = [ 'mlp.dense_4h_to_h.weight', 'self_attention.dense.weight', ] def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = { """word_embeddings.weight""": """word_embeddings.weight""", """word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""", """word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""", """weight""": """ln_f.weight""", """bias""": """ln_f.bias""", } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks lowercase_ = int(re.match(r""".*layer_(\d*).*""" , UpperCAmelCase__ )[1] ) layer_number -= 3 return F'''h.{layer_number}.''' + key def UpperCAmelCase_ ( UpperCAmelCase__ ): if dtype == torch.bool: return 1 / 8 lowercase_ = re.search(r"""[^\d](\d+)$""" , str(UpperCAmelCase__ ) ) if bit_search is None: raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' ) lowercase_ = int(bit_search.groups()[0] ) return bit_size // 8 def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): # Construct model if bloom_config_file == "": lowercase_ = BloomConfig() else: lowercase_ = BloomConfig.from_json_file(UpperCAmelCase__ ) if shard_model: lowercase_ = os.listdir(UpperCAmelCase__ ) lowercase_ = sorted(filter(lambda UpperCAmelCase__ : s.startswith("""layer""" ) and "model_00" in s , UpperCAmelCase__ ) ) lowercase_ = {"""weight_map""": {}, """metadata""": {}} lowercase_ = 0 lowercase_ = None lowercase_ = BloomConfig() for j, file in enumerate(UpperCAmelCase__ ): print("""Processing file: {}""".format(UpperCAmelCase__ ) ) lowercase_ = None for i in range(UpperCAmelCase__ ): # load all TP files lowercase_ = file.replace("""model_00""" , F'''model_0{i}''' ) lowercase_ = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , map_location="""cpu""" ) # Rename keys in the transformers names lowercase_ = list(temp.keys() ) for key in keys: lowercase_ = temp.pop(UpperCAmelCase__ ) if tensors is None: lowercase_ = temp else: for key in tensors.keys(): if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel lowercase_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks lowercase_ = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase__ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): lowercase_ = tensors[key] / pretraining_tp torch.save( UpperCAmelCase__ , os.path.join( UpperCAmelCase__ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase__ ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): lowercase_ = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: lowercase_ = """pytorch_model_{}-of-{}.bin""".format( str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase__ ) ).zfill(5 ) ) lowercase_ = BloomConfig() lowercase_ = pytorch_dump_folder_path + """/""" + CONFIG_NAME lowercase_ = total_size with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) with open(os.path.join(UpperCAmelCase__ , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f: lowercase_ = json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__ ) + """\n""" f.write(UpperCAmelCase__ ) else: lowercase_ = BloomModel(UpperCAmelCase__ ) lowercase_ = os.listdir(UpperCAmelCase__ ) lowercase_ = sorted(filter(lambda UpperCAmelCase__ : s.startswith("""layer""" ) and "model_00" in s , UpperCAmelCase__ ) ) lowercase_ = None for i, file in enumerate(UpperCAmelCase__ ): lowercase_ = None for i in range(UpperCAmelCase__ ): # load all TP files lowercase_ = file.replace("""model_00""" , F'''model_0{i}''' ) lowercase_ = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , map_location="""cpu""" ) # Rename keys in the transformers names lowercase_ = list(temp.keys() ) for key in keys: lowercase_ = temp.pop(UpperCAmelCase__ ) if tensors is None: lowercase_ = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel lowercase_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks lowercase_ = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase__ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): lowercase_ = tensors[key] / pretraining_tp lowercase_ = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ ) assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected''' if missing_keys is None: lowercase_ = set(other_keys.missing_keys ) else: lowercase_ = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, F'''The keys {missing_keys} are missing''' # Save pytorch-model os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ ) lowercase_ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME lowercase_ = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' ) if config.torch_dtype is not None: lowercase_ = model.to(config.torch_dtype ) torch.save(model.state_dict() , UpperCAmelCase__ ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--bloom_checkpoint_path', default=None, type=str, required=True, help='Path to the Megatron-LM checkpoint path.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--bloom_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--shard_model', action='store_true', help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint', ) parser.add_argument( '--pretraining_tp', default=4, type=int, help='Pretraining TP rank that has been used when training the model in Megatron-LM \n', ) a = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
650
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging a = logging.get_logger(__name__) class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values'] def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Dict , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = size if size is not None else {"""shortest_edge""": 224} lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" ) lowercase_ = do_resize lowercase_ = size lowercase_ = resample lowercase_ = do_center_crop lowercase_ = crop_size lowercase_ = do_rescale lowercase_ = rescale_factor lowercase_ = do_normalize lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowercase_ = int((256 / 224) * size["""shortest_edge"""] ) lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ ) lowercase_ = {"""height""": output_size[0], """width""": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ): '''simple docstring''' lowercase_ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ): '''simple docstring''' return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' lowercase_ = do_resize if do_resize is not None else self.do_resize lowercase_ = resample if resample is not None else self.resample lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase_ = do_rescale if do_rescale is not None else self.do_rescale lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ = do_normalize if do_normalize is not None else self.do_normalize lowercase_ = image_mean if image_mean is not None else self.image_mean lowercase_ = image_std if image_std is not None else self.image_std lowercase_ = size if size is not None else self.size lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) lowercase_ = crop_size if crop_size is not None else self.crop_size lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" ) lowercase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: lowercase_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images] if do_center_crop: lowercase_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images] if do_rescale: lowercase_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images] if do_normalize: lowercase_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images] lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] lowercase_ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
650
1
import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset a = pd.read_csv( 'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/' 'position_salaries.csv' ) a = dataset.iloc[:, 1:2].values a = dataset.iloc[:, 2].values a , a , a , a = train_test_split(X, y, test_size=0.2, random_state=0) a = PolynomialFeatures(degree=4) a = poly_reg.fit_transform(X) a = LinearRegression() pol_reg.fit(X_poly, y) def UpperCAmelCase_ ( ): plt.scatter(UpperCAmelCase__ , UpperCAmelCase__ , color="""red""" ) plt.plot(UpperCAmelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCAmelCase__ ) ) , color="""blue""" ) plt.title("""Truth or Bluff (Linear Regression)""" ) plt.xlabel("""Position level""" ) plt.ylabel("""Salary""" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
650
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) a = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
650
1
from math import ceil, sqrt def UpperCAmelCase_ ( UpperCAmelCase__ = 1_0_0_0_0_0_0 ): lowercase_ = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: lowercase_ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: lowercase_ = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(F'''{solution() = }''')
650
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() a = logging.get_logger(__name__) a = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } a = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): for attribute in key.split(""".""" ): lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) if weight_type is not None: lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape else: lowercase_ = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase_ = value elif weight_type == "weight_g": lowercase_ = value elif weight_type == "weight_v": lowercase_ = value elif weight_type == "bias": lowercase_ = value else: lowercase_ = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = [] lowercase_ = fairseq_model.state_dict() lowercase_ = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight lowercase_ = None for name, value in fairseq_dict.items(): lowercase_ = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , ) lowercase_ = True elif name.split(""".""" )[0] == "proj": lowercase_ = fairseq_model.proj lowercase_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: lowercase_ = True if "*" in mapped_key: lowercase_ = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2] lowercase_ = mapped_key.replace("""*""" , UpperCAmelCase__ ) if "weight_g" in name: lowercase_ = """weight_g""" elif "weight_v" in name: lowercase_ = """weight_v""" elif "bias" in name: lowercase_ = """bias""" elif "weight" in name: lowercase_ = """weight""" else: lowercase_ = None set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) continue if not is_used: unused_weights.append(UpperCAmelCase__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) return proj_weight def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = full_name.split("""conv_layers.""" )[-1] lowercase_ = name.split(""".""" ) lowercase_ = int(items[0] ) lowercase_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowercase_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCAmelCase__ ) def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ , lowercase_ = emb.weight.shape lowercase_ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ ) lowercase_ = emb.weight.data return lin_layer def UpperCAmelCase_ ( UpperCAmelCase__ ): with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f: lowercase_ = f.readlines() lowercase_ = [line.split(""" """ )[0] for line in lines] lowercase_ = len(UpperCAmelCase__ ) lowercase_ = { """<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3, } vocab_dict.update(dict(zip(UpperCAmelCase__ , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ): lowercase_ = WavaVecaConfig.from_pretrained(UpperCAmelCase__ ) lowercase_ = SpeechaTextaConfig.from_pretrained( UpperCAmelCase__ , vocab_size=UpperCAmelCase__ , decoder_layers=UpperCAmelCase__ , do_stable_layer_norm=UpperCAmelCase__ ) lowercase_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ) lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) lowercase_ = model[0].eval() # set weights for wav2vec2 encoder lowercase_ = WavaVecaModel(UpperCAmelCase__ ) lowercase_ = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase__ ) lowercase_ = SpeechaTextaForCausalLM(UpperCAmelCase__ ) lowercase_ , lowercase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase__ ) # set output linear layer unexpected_keys.remove("""embed_out""" ) lowercase_ = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) lowercase_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ ) lowercase_ = False # add projection layer lowercase_ = nn.Parameter(projection_layer.weight ) lowercase_ = nn.Parameter(projection_layer.bias ) lowercase_ = create_vocab_dict(UpperCAmelCase__ ) with open(os.path.join(UpperCAmelCase__ , """vocab.json""" ) , """w""" ) as fp: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase__ , """vocab.json""" ) ) tokenizer.save_pretrained(UpperCAmelCase__ ) lowercase_ = hf_wavavec.config.to_dict() lowercase_ = tokenizer.pad_token_id lowercase_ = tokenizer.bos_token_id lowercase_ = tokenizer.eos_token_id lowercase_ = """speech_to_text_2""" lowercase_ = """wav2vec2""" lowercase_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ ) hf_wavavec.save_pretrained(UpperCAmelCase__ ) feature_extractor.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-large-lv60', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/s2t-small-mustc-en-fr-st', type=str, help='Path to hf decoder s2t checkpoint config', ) parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder') parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers') a = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
650
1
import math import sys def UpperCAmelCase_ ( UpperCAmelCase__ ): if number != int(UpperCAmelCase__ ): raise ValueError("""the value of input must be a natural number""" ) if number < 0: raise ValueError("""the value of input must not be a negative number""" ) if number == 0: return 1 lowercase_ = [-1] * (number + 1) lowercase_ = 0 for i in range(1 , number + 1 ): lowercase_ = sys.maxsize lowercase_ = int(math.sqrt(UpperCAmelCase__ ) ) for j in range(1 , root + 1 ): lowercase_ = 1 + answers[i - (j**2)] lowercase_ = min(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
650
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) # TODO Update this a = { 'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json', # See all ESM models at https://huggingface.co/models?filter=esm } class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Tuple = 'esm' def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Dict=3_072 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=1_026 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : str , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = position_embedding_type lowercase_ = use_cache lowercase_ = emb_layer_norm_before lowercase_ = token_dropout lowercase_ = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("""No esmfold_config supplied for folding model, using default values.""" ) lowercase_ = EsmFoldConfig() elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = EsmFoldConfig(**UpperCamelCase__ ) lowercase_ = esmfold_config if vocab_list is None: logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" ) lowercase_ = get_default_vocab_list() else: lowercase_ = vocab_list else: lowercase_ = None lowercase_ = None if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ): raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = super().to_dict() if isinstance(self.esmfold_config , UpperCamelCase__ ): lowercase_ = self.esmfold_config.to_dict() return output @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : str = None __SCREAMING_SNAKE_CASE : bool = True __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : float = 0 __SCREAMING_SNAKE_CASE : bool = True __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : "TrunkConfig" = None def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' if self.trunk is None: lowercase_ = TrunkConfig() elif isinstance(self.trunk , UpperCamelCase__ ): lowercase_ = TrunkConfig(**self.trunk ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = asdict(self ) lowercase_ = self.trunk.to_dict() return output @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : int = 48 __SCREAMING_SNAKE_CASE : int = 1024 __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : float = 0 __SCREAMING_SNAKE_CASE : float = 0 __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : Optional[int] = 128 __SCREAMING_SNAKE_CASE : "StructureModuleConfig" = None def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' if self.structure_module is None: lowercase_ = StructureModuleConfig() elif isinstance(self.structure_module , UpperCamelCase__ ): lowercase_ = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( """`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got""" F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( """`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got""" F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) lowercase_ = self.sequence_state_dim // self.sequence_head_width lowercase_ = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( """`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got""" F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( """`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got""" F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = asdict(self ) lowercase_ = self.structure_module.to_dict() return output @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : int = 384 __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : int = 16 __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : int = 12 __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : int = 8 __SCREAMING_SNAKE_CASE : float = 0.1 __SCREAMING_SNAKE_CASE : int = 8 __SCREAMING_SNAKE_CASE : int = 1 __SCREAMING_SNAKE_CASE : int = 2 __SCREAMING_SNAKE_CASE : int = 7 __SCREAMING_SNAKE_CASE : int = 10 __SCREAMING_SNAKE_CASE : float = 1e-8 __SCREAMING_SNAKE_CASE : float = 1e5 def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' return asdict(self ) def UpperCAmelCase_ ( ): return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
650
1
from math import factorial a = {str(digit): factorial(digit) for digit in range(1_0)} def UpperCAmelCase_ ( UpperCAmelCase__ ): if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(UpperCAmelCase__ ) ) def UpperCAmelCase_ ( UpperCAmelCase__ = 6_0 , UpperCAmelCase__ = 1_0_0_0_0_0_0 ): if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length lowercase_ = 0 # the cached sizes of the previous chains lowercase_ = {} for start_chain_element in range(1 , UpperCAmelCase__ ): # The temporary set will contain the elements of the chain lowercase_ = set() lowercase_ = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. lowercase_ = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(UpperCAmelCase__ ) chain_set_length += 1 lowercase_ = digit_factorial_sum(UpperCAmelCase__ ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] lowercase_ = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'''{solution()}''')
650
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def UpperCAmelCase_ ( UpperCAmelCase__=None ): if subparsers is not None: lowercase_ = subparsers.add_parser("""env""" ) else: lowercase_ = argparse.ArgumentParser("""Accelerate env command""" ) parser.add_argument( """--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" ) if subparsers is not None: parser.set_defaults(func=UpperCAmelCase__ ) return parser def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = torch.__version__ lowercase_ = torch.cuda.is_available() lowercase_ = is_xpu_available() lowercase_ = is_npu_available() lowercase_ = """Not found""" # Get the default from the config file. if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ): lowercase_ = load_config_from_file(args.config_file ).to_dict() lowercase_ = { """`Accelerate` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """Numpy version""": np.__version__, """PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''', """PyTorch XPU available""": str(UpperCAmelCase__ ), """PyTorch NPU available""": str(UpperCAmelCase__ ), """System RAM""": F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''', } if pt_cuda_available: lowercase_ = torch.cuda.get_device_name() print("""\nCopy-and-paste the text below in your GitHub issue\n""" ) print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) ) print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" ) lowercase_ = ( """\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else F'''\t{accelerate_config}''' ) print(UpperCAmelCase__ ) lowercase_ = accelerate_config return info def UpperCAmelCase_ ( ): lowercase_ = env_command_parser() lowercase_ = parser.parse_args() env_command(UpperCAmelCase__ ) return 0 if __name__ == "__main__": raise SystemExit(main())
650
1
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel a = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class UpperCamelCase__ ( unittest.TestCase ): @classmethod def UpperCAmelCase__ ( cls : Tuple ): '''simple docstring''' lowercase_ = TOKEN HfFolder.save_token(UpperCamelCase__ ) @classmethod def UpperCAmelCase__ ( cls : Optional[int] ): '''simple docstring''' try: delete_repo(token=cls._token , repo_id="""test-model-flax""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" ) except HTTPError: pass def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowercase_ = FlaxBertModel(UpperCamelCase__ ) model.push_to_hub("""test-model-flax""" , use_auth_token=self._token ) lowercase_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' ) lowercase_ = flatten_dict(unfreeze(model.params ) ) lowercase_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowercase_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(UpperCamelCase__ , 1e-3 , msg=F'''{key} not identical''' ) # Reset repo delete_repo(token=self._token , repo_id="""test-model-flax""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(UpperCamelCase__ , repo_id="""test-model-flax""" , push_to_hub=UpperCamelCase__ , use_auth_token=self._token ) lowercase_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' ) lowercase_ = flatten_dict(unfreeze(model.params ) ) lowercase_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowercase_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(UpperCamelCase__ , 1e-3 , msg=F'''{key} not identical''' ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowercase_ = FlaxBertModel(UpperCamelCase__ ) model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token ) lowercase_ = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" ) lowercase_ = flatten_dict(unfreeze(model.params ) ) lowercase_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowercase_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(UpperCamelCase__ , 1e-3 , msg=F'''{key} not identical''' ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( UpperCamelCase__ , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=UpperCamelCase__ , use_auth_token=self._token ) lowercase_ = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" ) lowercase_ = flatten_dict(unfreeze(model.params ) ) lowercase_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowercase_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(UpperCamelCase__ , 1e-3 , msg=F'''{key} not identical''' ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = True lowercase_ = flatten_dict(modela.params ) lowercase_ = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4: lowercase_ = False return models_are_equal @require_flax class UpperCamelCase__ ( unittest.TestCase ): def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) lowercase_ = FlaxBertModel(UpperCamelCase__ ) lowercase_ = """bert""" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) with self.assertRaises(UpperCamelCase__ ): lowercase_ = FlaxBertModel.from_pretrained(UpperCamelCase__ ) lowercase_ = FlaxBertModel.from_pretrained(UpperCamelCase__ , subfolder=UpperCamelCase__ ) self.assertTrue(check_models_equal(UpperCamelCase__ , UpperCamelCase__ ) ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) lowercase_ = FlaxBertModel(UpperCamelCase__ ) lowercase_ = """bert""" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , max_shard_size="""10KB""" ) with self.assertRaises(UpperCamelCase__ ): lowercase_ = FlaxBertModel.from_pretrained(UpperCamelCase__ ) lowercase_ = FlaxBertModel.from_pretrained(UpperCamelCase__ , subfolder=UpperCamelCase__ ) self.assertTrue(check_models_equal(UpperCamelCase__ , UpperCamelCase__ ) ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = """bert""" lowercase_ = """hf-internal-testing/tiny-random-bert-subfolder""" with self.assertRaises(UpperCamelCase__ ): lowercase_ = FlaxBertModel.from_pretrained(UpperCamelCase__ ) lowercase_ = FlaxBertModel.from_pretrained(UpperCamelCase__ , subfolder=UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' lowercase_ = """bert""" lowercase_ = """hf-internal-testing/tiny-random-bert-sharded-subfolder""" with self.assertRaises(UpperCamelCase__ ): lowercase_ = FlaxBertModel.from_pretrained(UpperCamelCase__ ) lowercase_ = FlaxBertModel.from_pretrained(UpperCamelCase__ , subfolder=UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ )
650
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class UpperCamelCase__ : def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Tuple=30 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=2 , ): '''simple docstring''' lowercase_ = parent lowercase_ = batch_size lowercase_ = image_size lowercase_ = patch_size lowercase_ = num_channels lowercase_ = is_training lowercase_ = use_labels lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = scope lowercase_ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowercase_ = (image_size // patch_size) ** 2 lowercase_ = num_patches + 2 def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ): '''simple docstring''' lowercase_ = DeiTModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ): '''simple docstring''' lowercase_ = DeiTForMaskedImageModeling(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase_ = 1 lowercase_ = DeiTForMaskedImageModeling(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ): '''simple docstring''' lowercase_ = self.type_sequence_label_size lowercase_ = DeiTForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase_ = 1 lowercase_ = DeiTForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = config_and_inputs lowercase_ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : str = ( { 'feature-extraction': DeiTModel, 'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : List[Any] = False __SCREAMING_SNAKE_CASE : List[Any] = False def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = DeiTModelTester(self ) lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = model_class(UpperCamelCase__ ) lowercase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ = [*signature.parameters.keys()] lowercase_ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False ): '''simple docstring''' lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' if not self.model_tester.is_training: return lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(UpperCamelCase__ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue lowercase_ = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.train() lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowercase_ = model(**UpperCamelCase__ ).loss loss.backward() def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return lowercase_ = False lowercase_ = True for model_class in self.all_model_classes: if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue lowercase_ = model_class(UpperCamelCase__ ) model.gradient_checkpointing_enable() model.to(UpperCamelCase__ ) model.train() lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowercase_ = model(**UpperCamelCase__ ).loss loss.backward() def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = [ {"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float}, {"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long}, {"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(UpperCamelCase__ ), *get_values(UpperCamelCase__ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ): lowercase_ = problem_type["""title"""] lowercase_ = problem_type["""num_labels"""] lowercase_ = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.train() lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) if problem_type["num_labels"] > 1: lowercase_ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] ) lowercase_ = inputs["""labels"""].to(problem_type["""dtype"""] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list: lowercase_ = model(**UpperCamelCase__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def UpperCAmelCase__ ( self : int ): '''simple docstring''' for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ = DeiTModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def UpperCAmelCase_ ( ): lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to( UpperCamelCase__ ) lowercase_ = self.default_image_processor lowercase_ = prepare_img() lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowercase_ = model(**UpperCamelCase__ ) # verify the logits lowercase_ = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowercase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = DeiTModel.from_pretrained( """facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" ) lowercase_ = self.default_image_processor lowercase_ = prepare_img() lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ) lowercase_ = inputs.pixel_values.to(UpperCamelCase__ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowercase_ = model(UpperCamelCase__ )
650
1
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Union[List[PIL.Image.Image], np.ndarray] __SCREAMING_SNAKE_CASE : Optional[List[bool]] __SCREAMING_SNAKE_CASE : Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
650
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
650
1
import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): @property def UpperCAmelCase__ ( self : int ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = ort.SessionOptions() lowercase_ = False return options def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) lowercase_ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) lowercase_ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" ) # using the PNDM scheduler by default lowercase_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = """A red cat sitting on a park bench""" lowercase_ = np.random.RandomState(0 ) lowercase_ = pipe( prompt=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=UpperCamelCase__ , output_type="""np""" , ) lowercase_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-2
650
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizer __SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizerFast __SCREAMING_SNAKE_CASE : List[Any] = True __SCREAMING_SNAKE_CASE : int = True def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = """<pad>""" lowercase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(len(UpperCamelCase__ ) , 1_008 ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) lowercase_ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowercase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowercase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def UpperCAmelCase__ ( self : str ): '''simple docstring''' return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(UpperCamelCase__ , f.name ) lowercase_ = XGLMTokenizer(f.name , keep_accents=UpperCamelCase__ ) lowercase_ = pickle.dumps(UpperCamelCase__ ) pickle.loads(UpperCamelCase__ ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' if not self.test_rust_tokenizer: return lowercase_ = self.get_tokenizer() lowercase_ = self.get_rust_tokenizer() lowercase_ = """I was born in 92000, and this is falsé.""" lowercase_ = tokenizer.tokenize(UpperCamelCase__ ) lowercase_ = rust_tokenizer.tokenize(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) lowercase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = self.get_rust_tokenizer() lowercase_ = tokenizer.encode(UpperCamelCase__ ) lowercase_ = rust_tokenizer.encode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = """Hello World!""" lowercase_ = [2, 31_227, 4_447, 35] self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) ) @slow def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth""" ) # fmt: off lowercase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) ) @slow def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = { """input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="""facebook/xglm-564M""" , padding=UpperCamelCase__ , )
650
1
import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class UpperCamelCase__ ( __magic_name__ ): def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Union[str, Any]=None , **UpperCamelCase__ : List[Any] ): '''simple docstring''' lowercase_ = parent lowercase_ = config_class lowercase_ = has_text_modality lowercase_ = kwargs lowercase_ = common_properties def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.config_class(**self.inputs_dict ) lowercase_ = ( ["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(["""vocab_size"""] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) , msg=F'''`{prop}` does not exist''' ) # Test that config has the common properties as setter for idx, name in enumerate(UpperCamelCase__ ): try: setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) self.parent.assertEqual( getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , msg=F'''`{name} value {idx} expected, but was {getattr(UpperCamelCase__ , UpperCamelCase__ )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(UpperCamelCase__ ): try: lowercase_ = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , msg=F'''`{name} value {idx} expected, but was {getattr(UpperCamelCase__ , UpperCamelCase__ )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = self.config_class(**self.inputs_dict ) lowercase_ = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ = os.path.join(UpperCamelCase__ , """config.json""" ) config_first.to_json_file(UpperCamelCase__ ) lowercase_ = self.config_class.from_json_file(UpperCamelCase__ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(UpperCamelCase__ ) lowercase_ = self.config_class.from_pretrained(UpperCamelCase__ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = self.config_class(**self.inputs_dict ) lowercase_ = """test""" with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) config_first.save_pretrained(UpperCamelCase__ ) lowercase_ = self.config_class.from_pretrained(UpperCamelCase__ , subfolder=UpperCamelCase__ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) lowercase_ = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' if self.config_class.is_composition: return lowercase_ = self.config_class() self.parent.assertIsNotNone(UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = copy.deepcopy(UpperCamelCase__ ) lowercase_ = self.config_class(**UpperCamelCase__ ) lowercase_ = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) ) elif getattr(UpperCamelCase__ , UpperCamelCase__ ) != value: wrong_values.append((key, getattr(UpperCamelCase__ , UpperCamelCase__ ), value) ) if len(UpperCamelCase__ ) > 0: lowercase_ = """\n""".join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] ) raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
650
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: a = None a = logging.get_logger(__name__) a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} a = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 a = { 't5-small': 5_1_2, 't5-base': 5_1_2, 't5-large': 5_1_2, 't5-3b': 5_1_2, 't5-11b': 5_1_2, } class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask'] __SCREAMING_SNAKE_CASE : Dict = TaTokenizer __SCREAMING_SNAKE_CASE : List[int] = [] def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]=100 , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: lowercase_ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens lowercase_ = len(set(filter(lambda UpperCamelCase__ : bool("""extra_id_""" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) ) if extra_tokens != extra_ids: raise ValueError( F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , ) lowercase_ = vocab_file lowercase_ = False if not self.vocab_file else True lowercase_ = extra_ids @staticmethod def UpperCAmelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int ): '''simple docstring''' if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: lowercase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F''' {pretrained_model_name_or_path} automatically truncating your input to''' F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase__ , ) return max_model_length def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ): copyfile(self.vocab_file , UpperCamelCase__ ) logger.info(F'''Copy vocab file to {out_vocab_file}''' ) return (out_vocab_file,) def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase_ = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: lowercase_ = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return list( set(filter(lambda UpperCamelCase__ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
650
1
from collections.abc import Sequence def UpperCAmelCase_ ( UpperCAmelCase__ = None ): if nums is None or not nums: raise ValueError("""Input sequence should not be empty""" ) lowercase_ = nums[0] for i in range(1 , len(UpperCAmelCase__ ) ): lowercase_ = nums[i] lowercase_ = max(UpperCAmelCase__ , ans + num , UpperCAmelCase__ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user a = int(input('Enter number of elements : ').strip()) a = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n] print(max_subsequence_sum(array))
650
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionDiffEditPipeline __SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} __SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} __SCREAMING_SNAKE_CASE : int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __SCREAMING_SNAKE_CASE : Any = frozenset([] ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) lowercase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , ) lowercase_ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) lowercase_ = DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , ) torch.manual_seed(0 ) lowercase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowercase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , ) lowercase_ = CLIPTextModel(UpperCamelCase__ ) lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase_ = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=0 ): '''simple docstring''' lowercase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowercase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) if str(UpperCamelCase__ ).startswith("""mps""" ): lowercase_ = torch.manual_seed(UpperCamelCase__ ) else: lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowercase_ = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ): '''simple docstring''' lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ) if str(UpperCamelCase__ ).startswith("""mps""" ): lowercase_ = torch.manual_seed(UpperCamelCase__ ) else: lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowercase_ = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=0 ): '''simple docstring''' lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ) if str(UpperCamelCase__ ).startswith("""mps""" ): lowercase_ = torch.manual_seed(UpperCamelCase__ ) else: lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowercase_ = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : int ): '''simple docstring''' if not hasattr(self.pipeline_class , """_optional_components""" ): return lowercase_ = self.get_dummy_components() lowercase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) lowercase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowercase_ = pipe(**UpperCamelCase__ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCamelCase__ ) lowercase_ = self.pipeline_class.from_pretrained(UpperCamelCase__ ) pipe_loaded.to(UpperCamelCase__ ) pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ ) for optional_component in pipe._optional_components: self.assertTrue( getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , ) lowercase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowercase_ = pipe_loaded(**UpperCamelCase__ )[0] lowercase_ = np.abs(output - output_loaded ).max() self.assertLess(UpperCamelCase__ , 1e-4 ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = """cpu""" lowercase_ = self.get_dummy_components() lowercase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_dummy_mask_inputs(UpperCamelCase__ ) lowercase_ = pipe.generate_mask(**UpperCamelCase__ ) lowercase_ = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) lowercase_ = np.array([0] * 9 ) lowercase_ = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase__ , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = """cpu""" lowercase_ = self.get_dummy_components() lowercase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ ) lowercase_ = pipe.invert(**UpperCamelCase__ ).images lowercase_ = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowercase_ = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase__ , 1e-3 ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = """cpu""" lowercase_ = self.get_dummy_components() lowercase_ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""} lowercase_ = DPMSolverMultistepScheduler(**UpperCamelCase__ ) lowercase_ = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ ) lowercase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ ) lowercase_ = pipe.invert(**UpperCamelCase__ ).images lowercase_ = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowercase_ = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase__ , 1e-3 ) @require_torch_gpu @slow class UpperCamelCase__ ( unittest.TestCase ): def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def UpperCAmelCase__ ( cls : Dict ): '''simple docstring''' lowercase_ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" ) lowercase_ = raw_image.convert("""RGB""" ).resize((768, 768) ) lowercase_ = raw_image def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = torch.manual_seed(0 ) lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa ) lowercase_ = DDIMScheduler.from_config(pipe.scheduler.config ) lowercase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = """a bowl of fruit""" lowercase_ = """a bowl of pears""" lowercase_ = pipe.generate_mask( image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , ) lowercase_ = pipe.invert( prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents lowercase_ = pipe( prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0] lowercase_ = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1 def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = torch.manual_seed(0 ) lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa ) lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowercase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = """a bowl of fruit""" lowercase_ = """a bowl of pears""" lowercase_ = pipe.generate_mask( image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , ) lowercase_ = pipe.invert( prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents lowercase_ = pipe( prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0] lowercase_ = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1
650
1
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : Optional[int] = PegasusConfig __SCREAMING_SNAKE_CASE : List[str] = {} __SCREAMING_SNAKE_CASE : Tuple = 'gelu' def __init__( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]=13 , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : int=99 , UpperCamelCase__ : int=32 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=40 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=0 , ): '''simple docstring''' lowercase_ = parent lowercase_ = batch_size lowercase_ = seq_length lowercase_ = is_training lowercase_ = use_labels lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = eos_token_id lowercase_ = pad_token_id lowercase_ = bos_token_id def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowercase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowercase_ = tf.concat([input_ids, eos_tensor] , axis=1 ) lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowercase_ = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, inputs_dict def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ): '''simple docstring''' lowercase_ = TFPegasusModel(config=UpperCamelCase__ ).get_decoder() lowercase_ = inputs_dict["""input_ids"""] lowercase_ = input_ids[:1, :] lowercase_ = inputs_dict["""attention_mask"""][:1, :] lowercase_ = inputs_dict["""head_mask"""] lowercase_ = 1 # first forward pass lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ ) lowercase_ , lowercase_ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowercase_ = tf.concat([input_ids, next_tokens] , axis=-1 ) lowercase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0] lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowercase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowercase_ = output_from_no_past[:, -3:, random_slice_idx] lowercase_ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ): if attention_mask is None: lowercase_ = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowercase_ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowercase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowercase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowercase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : int = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () __SCREAMING_SNAKE_CASE : Dict = (TFPegasusForConditionalGeneration,) if is_tf_available() else () __SCREAMING_SNAKE_CASE : Dict = ( { 'conversational': TFPegasusForConditionalGeneration, 'feature-extraction': TFPegasusModel, 'summarization': TFPegasusForConditionalGeneration, 'text2text-generation': TFPegasusForConditionalGeneration, 'translation': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : Dict = False def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = TFPegasusModelTester(self ) lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ ) @require_sentencepiece @require_tokenizers @require_tf class UpperCamelCase__ ( unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = [ ' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.', ' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ', ] __SCREAMING_SNAKE_CASE : Optional[int] = [ 'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to' ' reduce the risk of wildfires.', 'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.', ] # differs slightly from pytorch, likely due to numerical differences in linear layers __SCREAMING_SNAKE_CASE : Union[str, Any] = 'google/pegasus-xsum' @cached_property def UpperCAmelCase__ ( self : str ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def UpperCAmelCase__ ( self : Dict , **UpperCamelCase__ : Dict ): '''simple docstring''' lowercase_ = self.translate_src_text(**UpperCamelCase__ ) assert self.expected_text == generated_words def UpperCAmelCase__ ( self : str , **UpperCamelCase__ : List[Any] ): '''simple docstring''' lowercase_ = self.tokenizer(self.src_text , **UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""tf""" ) lowercase_ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , ) lowercase_ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__ ) return generated_words @slow def UpperCAmelCase__ ( self : int ): '''simple docstring''' self._assert_generated_batch_equal_expected()
650
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging a = logging.get_logger(__name__) class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : str = ['pixel_values'] def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 8 , **UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = do_rescale lowercase_ = rescale_factor lowercase_ = do_pad lowercase_ = pad_size def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] ): '''simple docstring''' return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None ): '''simple docstring''' lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ ) lowercase_ = (old_height // size + 1) * size - old_height lowercase_ = (old_width // size + 1) * size - old_width return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ): '''simple docstring''' lowercase_ = do_rescale if do_rescale is not None else self.do_rescale lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ = do_pad if do_pad is not None else self.do_pad lowercase_ = pad_size if pad_size is not None else self.pad_size lowercase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_rescale: lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_pad: lowercase_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] lowercase_ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
650
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase a = logging.get_logger(__name__) a = { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json', 'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json', 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json' ), } class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Any = 'longformer' def __init__( self : Union[str, Any] , UpperCamelCase__ : Union[List[int], int] = 512 , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 30_522 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 3_072 , UpperCamelCase__ : str = "gelu" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 2 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : float = 1e-12 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) lowercase_ = attention_window lowercase_ = sep_token_id lowercase_ = bos_token_id lowercase_ = eos_token_id lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = hidden_act lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = onnx_export class UpperCamelCase__ ( __magic_name__ ): def __init__( self : Optional[Any] , UpperCamelCase__ : "PretrainedConfig" , UpperCamelCase__ : str = "default" , UpperCamelCase__ : "List[PatchingSpec]" = None ): '''simple docstring''' super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = True @property def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' if self.task == "multiple-choice": lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase_ = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""global_attention_mask""", dynamic_axis), ] ) @property def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = super().outputs if self.task == "default": lowercase_ = {0: """batch"""} return outputs @property def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' return 1e-4 @property def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return max(super().default_onnx_opset , 14 ) def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : "PreTrainedTokenizerBase" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ): '''simple docstring''' lowercase_ = super().generate_dummy_inputs( preprocessor=UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly lowercase_ = torch.zeros_like(inputs["""input_ids"""] ) # make every second token global lowercase_ = 1 return inputs
650
def UpperCAmelCase_ ( UpperCAmelCase__ ): if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise TypeError("""Input value must be an 'int' type""" ) lowercase_ = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
650
1
from __future__ import annotations def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = 0.00 lowercase_ = 0 for resistor in resistors: if resistor <= 0: lowercase_ = F'''Resistor at index {index} has a negative or zero value!''' raise ValueError(UpperCAmelCase__ ) first_sum += 1 / float(UpperCAmelCase__ ) index += 1 return 1 / first_sum def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = 0.00 lowercase_ = 0 for resistor in resistors: sum_r += resistor if resistor < 0: lowercase_ = F'''Resistor at index {index} has a negative value!''' raise ValueError(UpperCAmelCase__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
650
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ): @register_to_config def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ): '''simple docstring''' super().__init__() lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = False lowercase_ = nn.Dropout(p=UpperCamelCase__ ) lowercase_ = TaConfig( vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , ) lowercase_ = nn.ModuleList() for lyr_num in range(UpperCamelCase__ ): lowercase_ = TaBlock(UpperCamelCase__ ) self.encoders.append(UpperCamelCase__ ) lowercase_ = TaLayerNorm(UpperCamelCase__ ) lowercase_ = nn.Dropout(p=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ): '''simple docstring''' lowercase_ = self.token_embedder(UpperCamelCase__ ) lowercase_ = encoder_input_tokens.shape[1] lowercase_ = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device ) x += self.position_encoding(UpperCamelCase__ ) lowercase_ = self.dropout_pre(UpperCamelCase__ ) # inverted the attention mask lowercase_ = encoder_input_tokens.size() lowercase_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ ) for lyr in self.encoders: lowercase_ = lyr(UpperCamelCase__ , UpperCamelCase__ )[0] lowercase_ = self.layer_norm(UpperCamelCase__ ) return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
650
1
from collections.abc import Iterable from typing import Generic, TypeVar a = TypeVar('_T') class UpperCamelCase__ ( Generic[_T] ): def __init__( self : int , UpperCamelCase__ : Iterable[_T] | None = None ): '''simple docstring''' lowercase_ = list(iterable or [] ) lowercase_ = [] def __len__( self : Any ): '''simple docstring''' return len(self._stacka ) + len(self._stacka ) def __repr__( self : Dict ): '''simple docstring''' return F'''Queue({tuple(self._stacka[::-1] + self._stacka )})''' def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : _T ): '''simple docstring''' self._stacka.append(UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = self._stacka.pop lowercase_ = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError("""Queue is empty""" ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
650
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar a = TypeVar('T') class UpperCamelCase__ ( Generic[T] ): __SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys __SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache __SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache def __init__( self : str , UpperCamelCase__ : int ): '''simple docstring''' lowercase_ = deque() lowercase_ = set() if not n: lowercase_ = sys.maxsize elif n < 0: raise ValueError("""n should be an integer greater than 0.""" ) else: lowercase_ = n def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : T ): '''simple docstring''' if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: lowercase_ = self.dq_store.pop() self.key_reference.remove(UpperCamelCase__ ) else: self.dq_store.remove(UpperCamelCase__ ) self.dq_store.appendleft(UpperCamelCase__ ) self.key_reference.add(UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' for k in self.dq_store: print(UpperCamelCase__ ) def __repr__( self : Optional[Any] ): '''simple docstring''' return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() a = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
650
1
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): a = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: a = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = (images / 2 + 0.5).clamp(0 , 1 ) lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowercase_ = numpy_to_pil(UpperCAmelCase__ ) return images def UpperCAmelCase_ ( UpperCAmelCase__ ): if images.ndim == 3: lowercase_ = images[None, ...] lowercase_ = (images * 2_5_5).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowercase_ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images] else: lowercase_ = [Image.fromarray(UpperCAmelCase__ ) for image in images] return pil_images
650
def UpperCAmelCase_ ( UpperCAmelCase__ ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
650
1
a = 8.31_44_62 # Unit - J mol-1 K-1 def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
650
def UpperCAmelCase_ ( UpperCAmelCase__=2_8_1_2_3 ): lowercase_ = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i lowercase_ = set() lowercase_ = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(UpperCAmelCase__ ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
650
1
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: a = False a = logging.get_logger(__name__) a = 'ybelkada/fonts' def UpperCAmelCase_ ( ): if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ''' """Pix2StructImageProcessor. Please upgrade torch.""" ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): requires_backends(UpperCAmelCase__ , ["""torch"""] ) _check_torch_version() lowercase_ = image_tensor.unsqueeze(0 ) lowercase_ = torch.nn.functional.unfold(UpperCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) ) lowercase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase__ , UpperCAmelCase__ , -1 ) lowercase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 3_6 , UpperCAmelCase__ = "black" , UpperCAmelCase__ = "white" , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , ): requires_backends(UpperCAmelCase__ , """vision""" ) # Add new lines so that each line is no more than 80 characters. lowercase_ = textwrap.TextWrapper(width=8_0 ) lowercase_ = wrapper.wrap(text=UpperCAmelCase__ ) lowercase_ = """\n""".join(UpperCAmelCase__ ) if font_bytes is not None and font_path is None: lowercase_ = io.BytesIO(UpperCAmelCase__ ) elif font_path is not None: lowercase_ = font_path else: lowercase_ = hf_hub_download(UpperCAmelCase__ , """Arial.TTF""" ) lowercase_ = ImageFont.truetype(UpperCAmelCase__ , encoding="""UTF-8""" , size=UpperCAmelCase__ ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. lowercase_ = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , UpperCAmelCase__ ) ) lowercase_ , lowercase_ , lowercase_ , lowercase_ = temp_draw.textbbox((0, 0) , UpperCAmelCase__ , UpperCAmelCase__ ) # Create the actual image with a bit of padding around the text. lowercase_ = text_width + left_padding + right_padding lowercase_ = text_height + top_padding + bottom_padding lowercase_ = Image.new("""RGB""" , (image_width, image_height) , UpperCAmelCase__ ) lowercase_ = ImageDraw.Draw(UpperCAmelCase__ ) draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase__ , fill=UpperCAmelCase__ , font=UpperCAmelCase__ ) return image def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ): requires_backends(UpperCAmelCase__ , """vision""" ) # Convert to PIL image if necessary lowercase_ = to_pil_image(UpperCAmelCase__ ) lowercase_ = render_text(UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase_ = max(header_image.width , image.width ) lowercase_ = int(image.height * (new_width / image.width) ) lowercase_ = int(header_image.height * (new_width / header_image.width) ) lowercase_ = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary lowercase_ = to_numpy_array(UpperCAmelCase__ ) if infer_channel_dimension_format(UpperCAmelCase__ ) == ChannelDimension.LAST: lowercase_ = to_channel_dimension_format(UpperCAmelCase__ , ChannelDimension.LAST ) return new_image class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Tuple = ['flattened_patches'] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 2_048 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} lowercase_ = do_normalize lowercase_ = do_convert_rgb lowercase_ = max_patches lowercase_ = is_vqa def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : dict , **UpperCamelCase__ : Optional[int] ): '''simple docstring''' requires_backends(self.extract_flattened_patches , """torch""" ) _check_torch_version() # convert to torch lowercase_ = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST ) lowercase_ = torch.from_numpy(UpperCamelCase__ ) lowercase_ , lowercase_ = patch_size["""height"""], patch_size["""width"""] lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ ) # maximize scale s.t. lowercase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) lowercase_ = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 ) lowercase_ = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 ) lowercase_ = max(num_feasible_rows * patch_height , 1 ) lowercase_ = max(num_feasible_cols * patch_width , 1 ) lowercase_ = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] lowercase_ = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = patches.shape lowercase_ = patches_shape[1] lowercase_ = patches_shape[2] lowercase_ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] lowercase_ = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] lowercase_ = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] ) lowercase_ = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] lowercase_ = row_ids.to(torch.floataa ) lowercase_ = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] lowercase_ = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] lowercase_ = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float() lowercase_ = to_numpy_array(UpperCamelCase__ ) return result def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict ): '''simple docstring''' if image.dtype == np.uinta: lowercase_ = image.astype(np.floataa ) # take mean across the whole `image` lowercase_ = np.mean(UpperCamelCase__ ) lowercase_ = np.std(UpperCamelCase__ ) lowercase_ = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ): '''simple docstring''' lowercase_ = do_normalize if do_normalize is not None else self.do_normalize lowercase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase_ = patch_size if patch_size is not None else self.patch_size lowercase_ = max_patches if max_patches is not None else self.max_patches lowercase_ = self.is_vqa if kwargs.get("""data_format""" , UpperCamelCase__ ) is not None: raise ValueError("""data_format is not an accepted input as the outputs are """ ) lowercase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase_ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if is_vqa: if header_text is None: raise ValueError("""A header text must be provided for VQA models.""" ) lowercase_ = kwargs.pop("""font_bytes""" , UpperCamelCase__ ) lowercase_ = kwargs.pop("""font_path""" , UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = [header_text] * len(UpperCamelCase__ ) lowercase_ = [ render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ ) for i, image in enumerate(UpperCamelCase__ ) ] if do_normalize: lowercase_ = [self.normalize(image=UpperCamelCase__ ) for image in images] # convert to torch tensor and permute lowercase_ = [ self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ ) for image in images ] # create attention mask in numpy lowercase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] lowercase_ = BatchFeature( data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=UpperCamelCase__ ) return encoded_outputs
650
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class UpperCamelCase__ : def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ): '''simple docstring''' lowercase_ = parent lowercase_ = batch_size lowercase_ = seq_length lowercase_ = is_training lowercase_ = use_input_mask lowercase_ = use_token_type_ids lowercase_ = use_labels lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = num_labels lowercase_ = num_choices lowercase_ = scope def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ = None if self.use_input_mask: lowercase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ = None if self.use_token_type_ids: lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase_ = None lowercase_ = None lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase_ = ids_tensor([self.batch_size] , self.num_choices ) lowercase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ): '''simple docstring''' lowercase_ = OpenLlamaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' lowercase_ = True lowercase_ = OpenLlamaModel(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , ) lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , ) lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ): '''simple docstring''' lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ): '''simple docstring''' lowercase_ = True lowercase_ = True lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() # first forward pass lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , ) lowercase_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 ) lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0] lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0] # select random slice lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = config_and_inputs lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else () __SCREAMING_SNAKE_CASE : List[Any] = ( { 'feature-extraction': OpenLlamaModel, 'text-classification': OpenLlamaForSequenceClassification, 'text-generation': OpenLlamaForCausalLM, 'zero-shot': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : Optional[int] = False def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = OpenLlamaModelTester(self ) lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = 3 lowercase_ = input_dict["""input_ids"""] lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ ) lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = 3 lowercase_ = """single_label_classification""" lowercase_ = input_dict["""input_ids"""] lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ ) lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = 3 lowercase_ = """multi_label_classification""" lowercase_ = input_dict["""input_ids"""] lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ ) lowercase_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = ids_tensor([1, 10] , config.vocab_size ) lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase_ = OpenLlamaModel(UpperCamelCase__ ) original_model.to(UpperCamelCase__ ) original_model.eval() lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase_ = {"""type""": scaling_type, """factor""": 10.0} lowercase_ = OpenLlamaModel(UpperCamelCase__ ) scaled_model.to(UpperCamelCase__ ) scaled_model.eval() lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
650
1
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Optional[int] = ['image_processor', 'tokenizer'] __SCREAMING_SNAKE_CASE : str = 'LayoutLMv3ImageProcessor' __SCREAMING_SNAKE_CASE : Optional[Any] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__( self : str , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Tuple=None , **UpperCamelCase__ : Optional[int] ): '''simple docstring''' lowercase_ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , UpperCamelCase__ , ) lowercase_ = kwargs.pop("""feature_extractor""" ) lowercase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(UpperCamelCase__ , UpperCamelCase__ ) def __call__( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCamelCase__ : Union[List[List[int]], List[List[List[int]]]] = None , UpperCamelCase__ : Optional[Union[List[int], List[List[int]]]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( """You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( """You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" ) # first, apply the image processor lowercase_ = self.image_processor(images=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = [text] # add batch dimension (as the image processor always adds a batch dimension) lowercase_ = features["""words"""] lowercase_ = self.tokenizer( text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , ) # add pixel values lowercase_ = features.pop("""pixel_values""" ) if return_overflowing_tokens is True: lowercase_ = self.get_overflowing_images(UpperCamelCase__ , encoded_inputs["""overflow_to_sample_mapping"""] ) lowercase_ = images return encoded_inputs def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ): '''simple docstring''' lowercase_ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(UpperCamelCase__ ) != len(UpperCamelCase__ ): raise ValueError( """Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got""" F''' {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}''' ) return images_with_overflow def UpperCAmelCase__ ( self : Tuple , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[str] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : str ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ ) @property def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCamelCase__ , ) return self.image_processor_class @property def UpperCAmelCase__ ( self : str ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCamelCase__ , ) return self.image_processor
650
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: a = False a = logging.get_logger(__name__) a = 'ybelkada/fonts' def UpperCAmelCase_ ( ): if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ''' """Pix2StructImageProcessor. Please upgrade torch.""" ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): requires_backends(UpperCAmelCase__ , ["""torch"""] ) _check_torch_version() lowercase_ = image_tensor.unsqueeze(0 ) lowercase_ = torch.nn.functional.unfold(UpperCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) ) lowercase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase__ , UpperCAmelCase__ , -1 ) lowercase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 3_6 , UpperCAmelCase__ = "black" , UpperCAmelCase__ = "white" , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , ): requires_backends(UpperCAmelCase__ , """vision""" ) # Add new lines so that each line is no more than 80 characters. lowercase_ = textwrap.TextWrapper(width=8_0 ) lowercase_ = wrapper.wrap(text=UpperCAmelCase__ ) lowercase_ = """\n""".join(UpperCAmelCase__ ) if font_bytes is not None and font_path is None: lowercase_ = io.BytesIO(UpperCAmelCase__ ) elif font_path is not None: lowercase_ = font_path else: lowercase_ = hf_hub_download(UpperCAmelCase__ , """Arial.TTF""" ) lowercase_ = ImageFont.truetype(UpperCAmelCase__ , encoding="""UTF-8""" , size=UpperCAmelCase__ ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. lowercase_ = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , UpperCAmelCase__ ) ) lowercase_ , lowercase_ , lowercase_ , lowercase_ = temp_draw.textbbox((0, 0) , UpperCAmelCase__ , UpperCAmelCase__ ) # Create the actual image with a bit of padding around the text. lowercase_ = text_width + left_padding + right_padding lowercase_ = text_height + top_padding + bottom_padding lowercase_ = Image.new("""RGB""" , (image_width, image_height) , UpperCAmelCase__ ) lowercase_ = ImageDraw.Draw(UpperCAmelCase__ ) draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase__ , fill=UpperCAmelCase__ , font=UpperCAmelCase__ ) return image def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ): requires_backends(UpperCAmelCase__ , """vision""" ) # Convert to PIL image if necessary lowercase_ = to_pil_image(UpperCAmelCase__ ) lowercase_ = render_text(UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase_ = max(header_image.width , image.width ) lowercase_ = int(image.height * (new_width / image.width) ) lowercase_ = int(header_image.height * (new_width / header_image.width) ) lowercase_ = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary lowercase_ = to_numpy_array(UpperCAmelCase__ ) if infer_channel_dimension_format(UpperCAmelCase__ ) == ChannelDimension.LAST: lowercase_ = to_channel_dimension_format(UpperCAmelCase__ , ChannelDimension.LAST ) return new_image class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Tuple = ['flattened_patches'] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 2_048 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} lowercase_ = do_normalize lowercase_ = do_convert_rgb lowercase_ = max_patches lowercase_ = is_vqa def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : dict , **UpperCamelCase__ : Optional[int] ): '''simple docstring''' requires_backends(self.extract_flattened_patches , """torch""" ) _check_torch_version() # convert to torch lowercase_ = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST ) lowercase_ = torch.from_numpy(UpperCamelCase__ ) lowercase_ , lowercase_ = patch_size["""height"""], patch_size["""width"""] lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ ) # maximize scale s.t. lowercase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) lowercase_ = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 ) lowercase_ = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 ) lowercase_ = max(num_feasible_rows * patch_height , 1 ) lowercase_ = max(num_feasible_cols * patch_width , 1 ) lowercase_ = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] lowercase_ = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = patches.shape lowercase_ = patches_shape[1] lowercase_ = patches_shape[2] lowercase_ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] lowercase_ = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] lowercase_ = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] ) lowercase_ = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] lowercase_ = row_ids.to(torch.floataa ) lowercase_ = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] lowercase_ = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] lowercase_ = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float() lowercase_ = to_numpy_array(UpperCamelCase__ ) return result def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict ): '''simple docstring''' if image.dtype == np.uinta: lowercase_ = image.astype(np.floataa ) # take mean across the whole `image` lowercase_ = np.mean(UpperCamelCase__ ) lowercase_ = np.std(UpperCamelCase__ ) lowercase_ = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ): '''simple docstring''' lowercase_ = do_normalize if do_normalize is not None else self.do_normalize lowercase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase_ = patch_size if patch_size is not None else self.patch_size lowercase_ = max_patches if max_patches is not None else self.max_patches lowercase_ = self.is_vqa if kwargs.get("""data_format""" , UpperCamelCase__ ) is not None: raise ValueError("""data_format is not an accepted input as the outputs are """ ) lowercase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase_ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if is_vqa: if header_text is None: raise ValueError("""A header text must be provided for VQA models.""" ) lowercase_ = kwargs.pop("""font_bytes""" , UpperCamelCase__ ) lowercase_ = kwargs.pop("""font_path""" , UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = [header_text] * len(UpperCamelCase__ ) lowercase_ = [ render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ ) for i, image in enumerate(UpperCamelCase__ ) ] if do_normalize: lowercase_ = [self.normalize(image=UpperCamelCase__ ) for image in images] # convert to torch tensor and permute lowercase_ = [ self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ ) for image in images ] # create attention mask in numpy lowercase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] lowercase_ = BatchFeature( data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=UpperCamelCase__ ) return encoded_outputs
650
1
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING a = logging.get_logger(__name__) @add_end_docstrings(__magic_name__ ) class UpperCamelCase__ ( __magic_name__ ): def __init__( self : List[Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[Any] ): '''simple docstring''' super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) self.check_model_type(UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] ): '''simple docstring''' lowercase_ , lowercase_ = {}, {} if padding is not None: lowercase_ = padding if truncation is not None: lowercase_ = truncation if top_k is not None: lowercase_ = top_k return preprocess_params, {}, postprocess_params def __call__( self : Union[str, Any] , UpperCamelCase__ : Union["Image.Image", str] , UpperCamelCase__ : str = None , **UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' if isinstance(UpperCamelCase__ , (Image.Image, str) ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = {"""image""": image, """question""": question} else: lowercase_ = image lowercase_ = super().__call__(UpperCamelCase__ , **UpperCamelCase__ ) return results def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : str=False , UpperCamelCase__ : List[str]=False ): '''simple docstring''' lowercase_ = load_image(inputs["""image"""] ) lowercase_ = self.tokenizer( inputs["""question"""] , return_tensors=self.framework , padding=UpperCamelCase__ , truncation=UpperCamelCase__ ) lowercase_ = self.image_processor(images=UpperCamelCase__ , return_tensors=self.framework ) model_inputs.update(UpperCamelCase__ ) return model_inputs def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[Any] ): '''simple docstring''' lowercase_ = self.model(**UpperCamelCase__ ) return model_outputs def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any]=5 ): '''simple docstring''' if top_k > self.model.config.num_labels: lowercase_ = self.model.config.num_labels if self.framework == "pt": lowercase_ = model_outputs.logits.sigmoid()[0] lowercase_ , lowercase_ = probs.topk(UpperCamelCase__ ) else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) lowercase_ = scores.tolist() lowercase_ = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
650
import cva import numpy as np class UpperCamelCase__ : def __init__( self : List[str] , UpperCamelCase__ : float , UpperCamelCase__ : int ): '''simple docstring''' if k in (0.04, 0.06): lowercase_ = k lowercase_ = window_size else: raise ValueError("""invalid k value""" ) def __str__( self : Optional[int] ): '''simple docstring''' return str(self.k ) def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ): '''simple docstring''' lowercase_ = cva.imread(UpperCamelCase__ , 0 ) lowercase_ , lowercase_ = img.shape lowercase_ = [] lowercase_ = img.copy() lowercase_ = cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB ) lowercase_ , lowercase_ = np.gradient(UpperCamelCase__ ) lowercase_ = dx**2 lowercase_ = dy**2 lowercase_ = dx * dy lowercase_ = 0.04 lowercase_ = self.window_size // 2 for y in range(UpperCamelCase__ , h - offset ): for x in range(UpperCamelCase__ , w - offset ): lowercase_ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase_ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase_ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase_ = (wxx * wyy) - (wxy**2) lowercase_ = wxx + wyy lowercase_ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": a = HarrisCorner(0.04, 3) a , a = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
650
1
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType a = logging.get_logger(__name__) class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : List[Any] = 'vision-encoder-decoder' __SCREAMING_SNAKE_CASE : Union[str, Any] = True def __init__( self : Any , **UpperCamelCase__ : List[Any] ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( F'''A configuraton of type {self.model_type} cannot be instantiated because ''' F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) lowercase_ = kwargs.pop("""encoder""" ) lowercase_ = encoder_config.pop("""model_type""" ) lowercase_ = kwargs.pop("""decoder""" ) lowercase_ = decoder_config.pop("""model_type""" ) lowercase_ = AutoConfig.for_model(UpperCamelCase__ , **UpperCamelCase__ ) lowercase_ = AutoConfig.for_model(UpperCamelCase__ , **UpperCamelCase__ ) lowercase_ = True @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , UpperCamelCase__ : PretrainedConfig , UpperCamelCase__ : PretrainedConfig , **UpperCamelCase__ : int ): '''simple docstring''' logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) lowercase_ = True lowercase_ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = copy.deepcopy(self.__dict__ ) lowercase_ = self.encoder.to_dict() lowercase_ = self.decoder.to_dict() lowercase_ = self.__class__.model_type return output class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : List[str] = version.parse('1.11' ) @property def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return 1e-4 @property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class UpperCamelCase__ ( __magic_name__ ): @property def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = OrderedDict() lowercase_ = {0: """batch""", 1: """past_decoder_sequence + sequence"""} lowercase_ = {0: """batch""", 1: """past_decoder_sequence + sequence"""} lowercase_ = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : "PreTrainedTokenizerBase" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , ): '''simple docstring''' import torch lowercase_ = OrderedDict() lowercase_ = super().generate_dummy_inputs( UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ ) lowercase_ , lowercase_ = dummy_input["""input_ids"""].shape lowercase_ = (batch, encoder_sequence, self._config.encoder_hidden_size) lowercase_ = dummy_input.pop("""input_ids""" ) lowercase_ = dummy_input.pop("""attention_mask""" ) lowercase_ = torch.zeros(UpperCamelCase__ ) return common_inputs class UpperCamelCase__ ( __magic_name__ ): @property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : PretrainedConfig ): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : PretrainedConfig , UpperCamelCase__ : PretrainedConfig , UpperCamelCase__ : str = "default" ): '''simple docstring''' lowercase_ = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(UpperCamelCase__ , UpperCamelCase__ )
650
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): a = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: a = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = (images / 2 + 0.5).clamp(0 , 1 ) lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowercase_ = numpy_to_pil(UpperCAmelCase__ ) return images def UpperCAmelCase_ ( UpperCAmelCase__ ): if images.ndim == 3: lowercase_ = images[None, ...] lowercase_ = (images * 2_5_5).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowercase_ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images] else: lowercase_ = [Image.fromarray(UpperCAmelCase__ ) for image in images] return pil_images
650
1
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo a = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' a = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' a = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , ) def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[List[List[str]]] , UpperCamelCase__ : List[List[str]] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 4 , ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=UpperCamelCase__ , hypotheses=UpperCamelCase__ , min_len=UpperCamelCase__ , max_len=UpperCamelCase__ ) }
650
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : str = (UnCLIPScheduler,) def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : int ): '''simple docstring''' lowercase_ = { """num_train_timesteps""": 1_000, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**UpperCamelCase__ ) return config def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config(variance_type="""fixed_small_log""" ) lowercase_ = scheduler_class(**UpperCamelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5 def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config(variance_type="""learned_range""" ) lowercase_ = scheduler_class(**UpperCamelCase__ ) lowercase_ = 0.5 assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1e-5 def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config() lowercase_ = scheduler_class(**UpperCamelCase__ ) lowercase_ = scheduler.timesteps lowercase_ = self.dummy_model() lowercase_ = self.dummy_sample_deter lowercase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ ) # 2. predict previous mean of sample x_t-1 lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample lowercase_ = pred_prev_sample lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3 def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config() lowercase_ = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(25 ) lowercase_ = scheduler.timesteps lowercase_ = self.dummy_model() lowercase_ = self.dummy_sample_deter lowercase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ ) if i + 1 == timesteps.shape[0]: lowercase_ = None else: lowercase_ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowercase_ = scheduler.step( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample lowercase_ = pred_prev_sample lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3 def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' pass def UpperCAmelCase__ ( self : int ): '''simple docstring''' pass
650
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = StableDiffusionSAGPipeline __SCREAMING_SNAKE_CASE : int = TEXT_TO_IMAGE_PARAMS __SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS __SCREAMING_SNAKE_CASE : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS __SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS __SCREAMING_SNAKE_CASE : int = False def UpperCAmelCase__ ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) lowercase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) lowercase_ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) torch.manual_seed(0 ) lowercase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) lowercase_ = CLIPTextModel(UpperCamelCase__ ) lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase_ = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str]=0 ): '''simple docstring''' if str(UpperCamelCase__ ).startswith("""mps""" ): lowercase_ = torch.manual_seed(UpperCamelCase__ ) else: lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowercase_ = { """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) lowercase_ = sag_pipe.to(UpperCamelCase__ ) sag_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = """.""" lowercase_ = torch.manual_seed(0 ) lowercase_ = sag_pipe( [prompt] , generator=UpperCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) lowercase_ = output.images lowercase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase_ = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) lowercase_ = sag_pipe.to(UpperCamelCase__ ) sag_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = """.""" lowercase_ = torch.manual_seed(0 ) lowercase_ = sag_pipe( [prompt] , generator=UpperCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) lowercase_ = output.images lowercase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase_ = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) lowercase_ = sag_pipe.to(UpperCamelCase__ ) sag_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = """.""" lowercase_ = torch.manual_seed(0 ) lowercase_ = sag_pipe( [prompt] , width=768 , height=512 , generator=UpperCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) lowercase_ = output.images assert image.shape == (1, 512, 768, 3)
650
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : Optional[str] = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the training data.'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the validation data.'} ) __SCREAMING_SNAKE_CASE : Optional[float] = field( default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} ) __SCREAMING_SNAKE_CASE : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} ) __SCREAMING_SNAKE_CASE : float = field( default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = {} if self.train_dir is not None: lowercase_ = self.train_dir if self.validation_dir is not None: lowercase_ = self.validation_dir lowercase_ = data_files if data_files else None @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : str = field( default=__magic_name__ , metadata={ 'help': ( 'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ' 'checkpoint identifier on the hub. ' 'Don\'t set if you want to train a model from scratch.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , ) __SCREAMING_SNAKE_CASE : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) __SCREAMING_SNAKE_CASE : str = field(default=__magic_name__ , metadata={'help': 'Name or path of preprocessor config.'} ) __SCREAMING_SNAKE_CASE : bool = field( default=__magic_name__ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={'help': 'Stride to use for the encoder.'} , ) class UpperCamelCase__ : def __init__( self : Dict , UpperCamelCase__ : List[Any]=192 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : str=0.6 ): '''simple docstring''' lowercase_ = input_size lowercase_ = mask_patch_size lowercase_ = model_patch_size lowercase_ = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("""Input size must be divisible by mask patch size""" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("""Mask patch size must be divisible by model patch size""" ) lowercase_ = self.input_size // self.mask_patch_size lowercase_ = self.mask_patch_size // self.model_patch_size lowercase_ = self.rand_size**2 lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : int ): '''simple docstring''' lowercase_ = np.random.permutation(self.token_count )[: self.mask_count] lowercase_ = np.zeros(self.token_count , dtype=UpperCamelCase__ ) lowercase_ = 1 lowercase_ = mask.reshape((self.rand_size, self.rand_size) ) lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = torch.stack([example["""pixel_values"""] for example in examples] ) lowercase_ = torch.stack([example["""mask"""] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def UpperCAmelCase_ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mim""" , UpperCAmelCase__ , UpperCAmelCase__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase_ = training_args.get_process_log_level() logger.setLevel(UpperCAmelCase__ ) transformers.utils.logging.set_verbosity(UpperCAmelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowercase_ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase_ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. lowercase_ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. lowercase_ = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0: lowercase_ = ds["""train"""].train_test_split(data_args.train_val_split ) lowercase_ = split["""train"""] lowercase_ = split["""test"""] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase_ = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase__ ) elif model_args.model_name_or_path: lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ ) else: lowercase_ = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(UpperCAmelCase__ , """decoder_type""" ): lowercase_ = """simmim""" # adapt config lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size lowercase_ = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { """image_size""": model_args.image_size, """patch_size""": model_args.patch_size, """encoder_stride""": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ ) elif model_args.model_name_or_path: lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ ) else: lowercase_ = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: lowercase_ = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) lowercase_ = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase__ ) if training_args.do_train: lowercase_ = ds["""train"""].column_names else: lowercase_ = ds["""validation"""].column_names if data_args.image_column_name is not None: lowercase_ = data_args.image_column_name elif "image" in column_names: lowercase_ = """image""" elif "img" in column_names: lowercase_ = """img""" else: lowercase_ = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py lowercase_ = Compose( [ Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator lowercase_ = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(UpperCAmelCase__ ): lowercase_ = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]] lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: lowercase_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(UpperCAmelCase__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: lowercase_ = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(UpperCAmelCase__ ) # Initialize our trainer lowercase_ = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: lowercase_ = None if training_args.resume_from_checkpoint is not None: lowercase_ = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase_ = last_checkpoint lowercase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowercase_ = trainer.evaluate() trainer.log_metrics("""eval""" , UpperCAmelCase__ ) trainer.save_metrics("""eval""" , UpperCAmelCase__ ) # Write model card and (optionally) push to hub lowercase_ = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """masked-image-modeling""", """dataset""": data_args.dataset_name, """tags""": ["""masked-image-modeling"""], } if training_args.push_to_hub: trainer.push_to_hub(**UpperCAmelCase__ ) else: trainer.create_model_card(**UpperCAmelCase__ ) if __name__ == "__main__": main()
650
1
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class UpperCamelCase__ : def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ): '''simple docstring''' lowercase_ = parent lowercase_ = batch_size lowercase_ = seq_length lowercase_ = is_training lowercase_ = use_input_mask lowercase_ = use_token_type_ids lowercase_ = use_labels lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = num_labels lowercase_ = num_choices lowercase_ = scope def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ = None if self.use_input_mask: lowercase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ = None if self.use_token_type_ids: lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase_ = None lowercase_ = None lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase_ = ids_tensor([self.batch_size] , self.num_choices ) lowercase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ): '''simple docstring''' lowercase_ = OpenLlamaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' lowercase_ = True lowercase_ = OpenLlamaModel(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , ) lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , ) lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ): '''simple docstring''' lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ): '''simple docstring''' lowercase_ = True lowercase_ = True lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() # first forward pass lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , ) lowercase_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 ) lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0] lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0] # select random slice lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = config_and_inputs lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else () __SCREAMING_SNAKE_CASE : List[Any] = ( { 'feature-extraction': OpenLlamaModel, 'text-classification': OpenLlamaForSequenceClassification, 'text-generation': OpenLlamaForCausalLM, 'zero-shot': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : Optional[int] = False def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = OpenLlamaModelTester(self ) lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = 3 lowercase_ = input_dict["""input_ids"""] lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ ) lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = 3 lowercase_ = """single_label_classification""" lowercase_ = input_dict["""input_ids"""] lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ ) lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = 3 lowercase_ = """multi_label_classification""" lowercase_ = input_dict["""input_ids"""] lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ ) lowercase_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = ids_tensor([1, 10] , config.vocab_size ) lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase_ = OpenLlamaModel(UpperCamelCase__ ) original_model.to(UpperCamelCase__ ) original_model.eval() lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase_ = {"""type""": scaling_type, """factor""": 10.0} lowercase_ = OpenLlamaModel(UpperCamelCase__ ) scaled_model.to(UpperCamelCase__ ) scaled_model.eval() lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
650
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging a = logging.get_logger(__name__) class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values'] def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Dict , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = size if size is not None else {"""shortest_edge""": 224} lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" ) lowercase_ = do_resize lowercase_ = size lowercase_ = resample lowercase_ = do_center_crop lowercase_ = crop_size lowercase_ = do_rescale lowercase_ = rescale_factor lowercase_ = do_normalize lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowercase_ = int((256 / 224) * size["""shortest_edge"""] ) lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ ) lowercase_ = {"""height""": output_size[0], """width""": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ): '''simple docstring''' lowercase_ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ): '''simple docstring''' return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' lowercase_ = do_resize if do_resize is not None else self.do_resize lowercase_ = resample if resample is not None else self.resample lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase_ = do_rescale if do_rescale is not None else self.do_rescale lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ = do_normalize if do_normalize is not None else self.do_normalize lowercase_ = image_mean if image_mean is not None else self.image_mean lowercase_ = image_std if image_std is not None else self.image_std lowercase_ = size if size is not None else self.size lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) lowercase_ = crop_size if crop_size is not None else self.crop_size lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" ) lowercase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: lowercase_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images] if do_center_crop: lowercase_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images] if do_rescale: lowercase_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images] if do_normalize: lowercase_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images] lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] lowercase_ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
650
1
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizer __SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizerFast __SCREAMING_SNAKE_CASE : List[Any] = True __SCREAMING_SNAKE_CASE : int = True def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = """<pad>""" lowercase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(len(UpperCamelCase__ ) , 1_008 ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) lowercase_ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowercase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowercase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def UpperCAmelCase__ ( self : str ): '''simple docstring''' return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(UpperCamelCase__ , f.name ) lowercase_ = XGLMTokenizer(f.name , keep_accents=UpperCamelCase__ ) lowercase_ = pickle.dumps(UpperCamelCase__ ) pickle.loads(UpperCamelCase__ ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' if not self.test_rust_tokenizer: return lowercase_ = self.get_tokenizer() lowercase_ = self.get_rust_tokenizer() lowercase_ = """I was born in 92000, and this is falsé.""" lowercase_ = tokenizer.tokenize(UpperCamelCase__ ) lowercase_ = rust_tokenizer.tokenize(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) lowercase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = self.get_rust_tokenizer() lowercase_ = tokenizer.encode(UpperCamelCase__ ) lowercase_ = rust_tokenizer.encode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = """Hello World!""" lowercase_ = [2, 31_227, 4_447, 35] self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) ) @slow def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth""" ) # fmt: off lowercase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) ) @slow def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = { """input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="""facebook/xglm-564M""" , padding=UpperCamelCase__ , )
650
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) a = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
650
1
import math def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = [True] * n lowercase_ = False lowercase_ = False lowercase_ = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): lowercase_ = i * 2 while index < n: lowercase_ = False lowercase_ = index + i lowercase_ = [2] for i in range(3 , UpperCAmelCase__ , 2 ): if is_prime[i]: primes.append(UpperCAmelCase__ ) return primes def UpperCAmelCase_ ( UpperCAmelCase__ = 9_9_9_9_6_6_6_6_3_3_3_3 ): lowercase_ = math.floor(math.sqrt(UpperCAmelCase__ ) ) + 1_0_0 lowercase_ = prime_sieve(UpperCAmelCase__ ) lowercase_ = 0 lowercase_ = 0 lowercase_ = primes[prime_index] while (last_prime**2) <= limit: lowercase_ = primes[prime_index + 1] lowercase_ = last_prime**2 lowercase_ = next_prime**2 # Get numbers divisible by lps(current) lowercase_ = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) lowercase_ = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps lowercase_ = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair lowercase_ = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
650
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() a = logging.get_logger(__name__) a = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } a = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): for attribute in key.split(""".""" ): lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) if weight_type is not None: lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape else: lowercase_ = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase_ = value elif weight_type == "weight_g": lowercase_ = value elif weight_type == "weight_v": lowercase_ = value elif weight_type == "bias": lowercase_ = value else: lowercase_ = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = [] lowercase_ = fairseq_model.state_dict() lowercase_ = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight lowercase_ = None for name, value in fairseq_dict.items(): lowercase_ = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , ) lowercase_ = True elif name.split(""".""" )[0] == "proj": lowercase_ = fairseq_model.proj lowercase_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: lowercase_ = True if "*" in mapped_key: lowercase_ = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2] lowercase_ = mapped_key.replace("""*""" , UpperCAmelCase__ ) if "weight_g" in name: lowercase_ = """weight_g""" elif "weight_v" in name: lowercase_ = """weight_v""" elif "bias" in name: lowercase_ = """bias""" elif "weight" in name: lowercase_ = """weight""" else: lowercase_ = None set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) continue if not is_used: unused_weights.append(UpperCAmelCase__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) return proj_weight def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = full_name.split("""conv_layers.""" )[-1] lowercase_ = name.split(""".""" ) lowercase_ = int(items[0] ) lowercase_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowercase_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCAmelCase__ ) def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ , lowercase_ = emb.weight.shape lowercase_ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ ) lowercase_ = emb.weight.data return lin_layer def UpperCAmelCase_ ( UpperCAmelCase__ ): with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f: lowercase_ = f.readlines() lowercase_ = [line.split(""" """ )[0] for line in lines] lowercase_ = len(UpperCAmelCase__ ) lowercase_ = { """<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3, } vocab_dict.update(dict(zip(UpperCAmelCase__ , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ): lowercase_ = WavaVecaConfig.from_pretrained(UpperCAmelCase__ ) lowercase_ = SpeechaTextaConfig.from_pretrained( UpperCAmelCase__ , vocab_size=UpperCAmelCase__ , decoder_layers=UpperCAmelCase__ , do_stable_layer_norm=UpperCAmelCase__ ) lowercase_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ) lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) lowercase_ = model[0].eval() # set weights for wav2vec2 encoder lowercase_ = WavaVecaModel(UpperCAmelCase__ ) lowercase_ = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase__ ) lowercase_ = SpeechaTextaForCausalLM(UpperCAmelCase__ ) lowercase_ , lowercase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase__ ) # set output linear layer unexpected_keys.remove("""embed_out""" ) lowercase_ = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) lowercase_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ ) lowercase_ = False # add projection layer lowercase_ = nn.Parameter(projection_layer.weight ) lowercase_ = nn.Parameter(projection_layer.bias ) lowercase_ = create_vocab_dict(UpperCAmelCase__ ) with open(os.path.join(UpperCAmelCase__ , """vocab.json""" ) , """w""" ) as fp: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase__ , """vocab.json""" ) ) tokenizer.save_pretrained(UpperCAmelCase__ ) lowercase_ = hf_wavavec.config.to_dict() lowercase_ = tokenizer.pad_token_id lowercase_ = tokenizer.bos_token_id lowercase_ = tokenizer.eos_token_id lowercase_ = """speech_to_text_2""" lowercase_ = """wav2vec2""" lowercase_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ ) hf_wavavec.save_pretrained(UpperCAmelCase__ ) feature_extractor.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-large-lv60', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/s2t-small-mustc-en-fr-st', type=str, help='Path to hf decoder s2t checkpoint config', ) parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder') parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers') a = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
650
1
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class UpperCamelCase__ ( unittest.TestCase ): def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = tf.convert_to_tensor( [ [ 8.2_220_991, # 3rd highest value; idx. 0 -0.5_620_044, 5.23_229_752, 4.0_386_393, -6.8_798_378, -0.54_785_802, -3.2_012_153, 2.92_777_176, 1.88_171_953, 7.35_341_276, # 5th highest value; idx. 9 8.43_207_833, # 2nd highest value; idx. 10 -9.85_711_836, -5.96_209_236, -1.13_039_161, -7.1_115_294, -0.8_369_633, -5.3_186_408, 7.06_427_407, 0.81_369_344, -0.82_023_817, -5.9_179_796, 0.58_813_443, -6.99_778_438, 4.71_551_189, -0.18_771_637, 7.44_020_759, # 4th highest value; idx. 25 9.38_450_987, # 1st highest value; idx. 26 2.12_662_941, -9.32_562_038, 2.35_652_522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58_425_518, 4.53_139_238, -5.57_510_464, -6.28_030_699, -7.19_529_503, -4.02_122_551, 1.39_337_037, -6.06_707_057, 1.59_480_517, -9.643_119, 0.03_907_799, 0.67_231_762, -8.88_206_726, 6.27_115_922, # 4th highest value; idx. 13 2.28_520_723, 4.82_767_506, 4.30_421_368, 8.8_275_313, # 2nd highest value; idx. 17 5.44_029_958, # 5th highest value; idx. 18 -4.4_735_794, 7.38_579_536, # 3rd highest value; idx. 20 -2.91_051_663, 2.61_946_077, -2.5_674_762, -9.48_959_302, -4.02_922_645, -1.35_416_918, 9.67_702_323, # 1st highest value; idx. 27 -5.89_478_553, 1.85_370_467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) lowercase_ = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above lowercase_ = tf.convert_to_tensor( [8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above lowercase_ = tf_top_k_top_p_filtering(UpperCamelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) lowercase_ = output[output != -float("""inf""" )] lowercase_ = tf.cast( tf.where(tf.not_equal(UpperCamelCase__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-12 ) tf.debugging.assert_equal(UpperCamelCase__ , UpperCamelCase__ ) @require_tf class UpperCamelCase__ ( unittest.TestCase , __magic_name__ ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): __SCREAMING_SNAKE_CASE : int = { 'AutoModelForCausalLM': TFAutoModelForCausalLM, 'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq, 'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM, 'AutoModelForVision2Seq': TFAutoModelForVisionaSeq, 'LogitsProcessorList': TFLogitsProcessorList, 'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor, 'create_tensor_fn': tf.convert_to_tensor, 'floats_tensor': floats_tensor, 'return_tensors': 'tf', } @slow def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) lowercase_ = 2 lowercase_ = 2 class UpperCamelCase__ ( tf.Module ): def __init__( self : List[Any] , UpperCamelCase__ : List[Any] ): '''simple docstring''' super(UpperCamelCase__ , self ).__init__() lowercase_ = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=UpperCamelCase__ , ) def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ): '''simple docstring''' lowercase_ = self.model.generate( input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , max_new_tokens=UpperCamelCase__ , return_dict_in_generate=UpperCamelCase__ , ) return {"sequences": outputs["sequences"]} lowercase_ = [[2, 0], [102, 103]] lowercase_ = [[1, 0], [1, 1]] lowercase_ = DummyModel(model=UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"""serving_default""": dummy_model.serving} ) lowercase_ = tf.saved_model.load(UpperCamelCase__ ).signatures["""serving_default"""] for batch_size in range(1 , len(UpperCamelCase__ ) + 1 ): lowercase_ = { """input_ids""": tf.constant(dummy_input_ids[:batch_size] ), """attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ), } lowercase_ = serving_func(**UpperCamelCase__ )["""sequences"""] lowercase_ = test_model.generate(**UpperCamelCase__ , max_new_tokens=UpperCamelCase__ ) tf.debugging.assert_equal(UpperCamelCase__ , UpperCamelCase__ ) @slow def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) lowercase_ = 1 lowercase_ = 2 class UpperCamelCase__ ( tf.Module ): def __init__( self : Tuple , UpperCamelCase__ : Any ): '''simple docstring''' super(UpperCamelCase__ , self ).__init__() lowercase_ = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=UpperCamelCase__ , ) def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Dict ): '''simple docstring''' lowercase_ = self.model.generate( input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , max_new_tokens=UpperCamelCase__ , return_dict_in_generate=UpperCamelCase__ , ) return {"sequences": outputs["sequences"]} lowercase_ = [[2], [102, 103]] lowercase_ = [[1], [1, 1]] lowercase_ = DummyModel(model=UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"""serving_default""": dummy_model.serving} ) lowercase_ = tf.saved_model.load(UpperCamelCase__ ).signatures["""serving_default"""] for input_row in range(len(UpperCamelCase__ ) ): lowercase_ = { """input_ids""": tf.constant([dummy_input_ids[input_row]] ), """attention_mask""": tf.constant([dummy_attention_masks[input_row]] ), } lowercase_ = serving_func(**UpperCamelCase__ )["""sequences"""] lowercase_ = test_model.generate(**UpperCamelCase__ , max_new_tokens=UpperCamelCase__ ) tf.debugging.assert_equal(UpperCamelCase__ , UpperCamelCase__ ) @slow @require_tensorflow_text def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=UpperCamelCase__ ) class UpperCamelCase__ ( tf.keras.layers.Layer ): def __init__( self : int ): '''simple docstring''' super().__init__() lowercase_ = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(UpperCamelCase__ , """spiece.model""" ) , """rb""" ).read() ) lowercase_ = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Tuple , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[int] ): '''simple docstring''' lowercase_ = self.tokenizer.tokenize(UpperCamelCase__ ) lowercase_ , lowercase_ = text.pad_model_inputs( UpperCamelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) lowercase_ = self.model.generate(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ ) return self.tokenizer.detokenize(UpperCamelCase__ ) lowercase_ = CompleteSentenceTransformer() lowercase_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" ) lowercase_ = complete_model(UpperCamelCase__ ) lowercase_ = tf.keras.Model(UpperCamelCase__ , UpperCamelCase__ ) keras_model.save(UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = { """do_sample""": True, """num_beams""": 1, """top_p""": 0.7, """top_k""": 10, """temperature""": 0.7, } lowercase_ = 14 lowercase_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) lowercase_ = """Hello, my dog is cute and""" lowercase_ = tokenizer(UpperCamelCase__ , return_tensors="""tf""" ) lowercase_ = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) lowercase_ = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) lowercase_ = model.generate(**UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) lowercase_ = [638, 198] with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) lowercase_ = model.generate(**UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) lowercase_ = """Hugging Face is a technology company based in New York and Paris.""" lowercase_ = bart_tokenizer(UpperCamelCase__ , return_tensors="""tf""" ).input_ids lowercase_ = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) lowercase_ = bart_model.generate(UpperCamelCase__ ).numpy() class UpperCamelCase__ ( __magic_name__ ): def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=None , **UpperCamelCase__ : Dict ): '''simple docstring''' return super().call(UpperCamelCase__ , **UpperCamelCase__ ) lowercase_ = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) lowercase_ = bart_model.generate(UpperCamelCase__ , foo="""bar""" ).numpy() self.assertTrue(np.array_equal(UpperCamelCase__ , UpperCamelCase__ ) ) class UpperCamelCase__ ( bart_model.model.encoder.__class__ ): def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[Any] , **UpperCamelCase__ : int ): '''simple docstring''' return super().call(UpperCamelCase__ , **UpperCamelCase__ ) lowercase_ = FakeEncoder(bart_model.config , bart_model.model.shared ) lowercase_ = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) lowercase_ = bart_model.generate(UpperCamelCase__ ).numpy() with self.assertRaises(UpperCamelCase__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(UpperCamelCase__ , foo="""bar""" )
650
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) # TODO Update this a = { 'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json', # See all ESM models at https://huggingface.co/models?filter=esm } class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Tuple = 'esm' def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Dict=3_072 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=1_026 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : str , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = position_embedding_type lowercase_ = use_cache lowercase_ = emb_layer_norm_before lowercase_ = token_dropout lowercase_ = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("""No esmfold_config supplied for folding model, using default values.""" ) lowercase_ = EsmFoldConfig() elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = EsmFoldConfig(**UpperCamelCase__ ) lowercase_ = esmfold_config if vocab_list is None: logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" ) lowercase_ = get_default_vocab_list() else: lowercase_ = vocab_list else: lowercase_ = None lowercase_ = None if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ): raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = super().to_dict() if isinstance(self.esmfold_config , UpperCamelCase__ ): lowercase_ = self.esmfold_config.to_dict() return output @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : str = None __SCREAMING_SNAKE_CASE : bool = True __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : float = 0 __SCREAMING_SNAKE_CASE : bool = True __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : "TrunkConfig" = None def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' if self.trunk is None: lowercase_ = TrunkConfig() elif isinstance(self.trunk , UpperCamelCase__ ): lowercase_ = TrunkConfig(**self.trunk ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = asdict(self ) lowercase_ = self.trunk.to_dict() return output @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : int = 48 __SCREAMING_SNAKE_CASE : int = 1024 __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : float = 0 __SCREAMING_SNAKE_CASE : float = 0 __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : Optional[int] = 128 __SCREAMING_SNAKE_CASE : "StructureModuleConfig" = None def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' if self.structure_module is None: lowercase_ = StructureModuleConfig() elif isinstance(self.structure_module , UpperCamelCase__ ): lowercase_ = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( """`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got""" F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( """`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got""" F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) lowercase_ = self.sequence_state_dim // self.sequence_head_width lowercase_ = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( """`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got""" F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( """`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got""" F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = asdict(self ) lowercase_ = self.structure_module.to_dict() return output @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : int = 384 __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : int = 16 __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : int = 12 __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : int = 8 __SCREAMING_SNAKE_CASE : float = 0.1 __SCREAMING_SNAKE_CASE : int = 8 __SCREAMING_SNAKE_CASE : int = 1 __SCREAMING_SNAKE_CASE : int = 2 __SCREAMING_SNAKE_CASE : int = 7 __SCREAMING_SNAKE_CASE : int = 10 __SCREAMING_SNAKE_CASE : float = 1e-8 __SCREAMING_SNAKE_CASE : float = 1e5 def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' return asdict(self ) def UpperCAmelCase_ ( ): return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
650
1
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def UpperCAmelCase_ ( UpperCAmelCase__ ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class UpperCamelCase__ ( nn.Module ): def __init__( self : int , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int ): '''simple docstring''' super().__init__() lowercase_ = module lowercase_ = nn.Sequential( nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__ ) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__ ) , ) lowercase_ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , *UpperCamelCase__ : int , **UpperCamelCase__ : List[Any] ): '''simple docstring''' return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) + self.adapter(UpperCamelCase__ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class UpperCamelCase__ ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module __SCREAMING_SNAKE_CASE : Union[str, Any] = 'bigscience/bloom-1b7' # Constant values __SCREAMING_SNAKE_CASE : Optional[int] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4 __SCREAMING_SNAKE_CASE : Dict = 'Hello my name is' __SCREAMING_SNAKE_CASE : Optional[Any] = set() EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' ) EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' ) EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' ) __SCREAMING_SNAKE_CASE : str = 10 def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' lowercase_ = AutoTokenizer.from_pretrained(self.model_name ) class UpperCamelCase__ ( __magic_name__ ): def UpperCAmelCase__ ( self : int ): '''simple docstring''' super().setUp() # Models and tokenizer lowercase_ = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="""auto""" ) lowercase_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = self.model_abit.config self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config""" ) ) lowercase_ = config.to_dict() lowercase_ = config.to_diff_dict() lowercase_ = config.to_json_string() def UpperCAmelCase__ ( self : Any ): '''simple docstring''' from bitsandbytes.nn import Paramsabit lowercase_ = self.model_fpaa.get_memory_footprint() lowercase_ = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) lowercase_ = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(UpperCamelCase__ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.tokenizer(self.input_text , return_tensors="""pt""" ) lowercase_ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' lowercase_ = BitsAndBytesConfig() lowercase_ = True lowercase_ = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""" ) lowercase_ = self.tokenizer(self.input_text , return_tensors="""pt""" ) lowercase_ = model_abit_from_config.generate( input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' with self.assertRaises(UpperCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = BitsAndBytesConfig() with self.assertRaises(UpperCamelCase__ ): lowercase_ = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' with self.assertRaises(UpperCamelCase__ ): # Tries with `str` self.model_abit.to("""cpu""" ) with self.assertRaises(UpperCamelCase__ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(UpperCamelCase__ ): # Tries with a `device` self.model_abit.to(torch.device("""cuda:0""" ) ) with self.assertRaises(UpperCamelCase__ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(UpperCamelCase__ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything lowercase_ = self.tokenizer(self.input_text , return_tensors="""pt""" ) lowercase_ = self.model_fpaa.to(torch.floataa ) lowercase_ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error lowercase_ = self.model_fpaa.to("""cpu""" ) # Check this does not throw an error lowercase_ = self.model_fpaa.half() # Check this does not throw an error lowercase_ = self.model_fpaa.float() def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class UpperCamelCase__ ( unittest.TestCase ): @classmethod def UpperCAmelCase__ ( cls : Any ): '''simple docstring''' lowercase_ = """t5-small""" lowercase_ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense lowercase_ = AutoTokenizer.from_pretrained(cls.model_name ) lowercase_ = """Translate in German: Hello, my dog is cute""" def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' from transformers import TaForConditionalGeneration lowercase_ = TaForConditionalGeneration._keep_in_fpaa_modules lowercase_ = None # test with `t5-small` lowercase_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) lowercase_ = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) lowercase_ = model.generate(**UpperCamelCase__ ) # test with `flan-t5-small` lowercase_ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) lowercase_ = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) lowercase_ = model.generate(**UpperCamelCase__ ) lowercase_ = modules def UpperCAmelCase__ ( self : Any ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` lowercase_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) lowercase_ = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) lowercase_ = model.generate(**UpperCamelCase__ ) # test with `flan-t5-small` lowercase_ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) lowercase_ = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) lowercase_ = model.generate(**UpperCamelCase__ ) class UpperCamelCase__ ( __magic_name__ ): def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' super().setUp() # model_name lowercase_ = """bigscience/bloom-560m""" lowercase_ = """t5-small""" # Different types of model lowercase_ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) # Sequence classification model lowercase_ = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) # CausalLM model lowercase_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) # Seq2seq model lowercase_ = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class UpperCamelCase__ ( __magic_name__ ): def UpperCAmelCase__ ( self : Any ): '''simple docstring''' super().setUp() def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = pipeline( """text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass lowercase_ = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class UpperCamelCase__ ( __magic_name__ ): def UpperCAmelCase__ ( self : int ): '''simple docstring''' super().setUp() def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model lowercase_ = self.tokenizer(self.input_text , return_tensors="""pt""" ) # Second real batch lowercase_ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS ) class UpperCamelCase__ ( __magic_name__ ): def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = """facebook/opt-350m""" super().setUp() def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ): return # Step 1: freeze all parameters lowercase_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): lowercase_ = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability lowercase_ = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(UpperCamelCase__ ) ): lowercase_ = LoRALayer(module.q_proj , rank=16 ) lowercase_ = LoRALayer(module.k_proj , rank=16 ) lowercase_ = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch lowercase_ = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): lowercase_ = model.forward(**UpperCamelCase__ ) out.logits.norm().backward() for module in model.modules(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(UpperCamelCase__ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Optional[int] = 'gpt2-xl' __SCREAMING_SNAKE_CASE : Tuple = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
650
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def UpperCAmelCase_ ( UpperCAmelCase__=None ): if subparsers is not None: lowercase_ = subparsers.add_parser("""env""" ) else: lowercase_ = argparse.ArgumentParser("""Accelerate env command""" ) parser.add_argument( """--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" ) if subparsers is not None: parser.set_defaults(func=UpperCAmelCase__ ) return parser def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = torch.__version__ lowercase_ = torch.cuda.is_available() lowercase_ = is_xpu_available() lowercase_ = is_npu_available() lowercase_ = """Not found""" # Get the default from the config file. if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ): lowercase_ = load_config_from_file(args.config_file ).to_dict() lowercase_ = { """`Accelerate` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """Numpy version""": np.__version__, """PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''', """PyTorch XPU available""": str(UpperCAmelCase__ ), """PyTorch NPU available""": str(UpperCAmelCase__ ), """System RAM""": F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''', } if pt_cuda_available: lowercase_ = torch.cuda.get_device_name() print("""\nCopy-and-paste the text below in your GitHub issue\n""" ) print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) ) print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" ) lowercase_ = ( """\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else F'''\t{accelerate_config}''' ) print(UpperCAmelCase__ ) lowercase_ = accelerate_config return info def UpperCAmelCase_ ( ): lowercase_ = env_command_parser() lowercase_ = parser.parse_args() env_command(UpperCAmelCase__ ) return 0 if __name__ == "__main__": raise SystemExit(main())
650
1
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys a = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') a = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split() a = '|'.join(sys.argv[1:]) a = re.compile(RF'''^({joined_dirs}).*?\.py$''') a = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
650
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class UpperCamelCase__ : def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Tuple=30 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=2 , ): '''simple docstring''' lowercase_ = parent lowercase_ = batch_size lowercase_ = image_size lowercase_ = patch_size lowercase_ = num_channels lowercase_ = is_training lowercase_ = use_labels lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = scope lowercase_ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowercase_ = (image_size // patch_size) ** 2 lowercase_ = num_patches + 2 def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ): '''simple docstring''' lowercase_ = DeiTModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ): '''simple docstring''' lowercase_ = DeiTForMaskedImageModeling(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase_ = 1 lowercase_ = DeiTForMaskedImageModeling(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ): '''simple docstring''' lowercase_ = self.type_sequence_label_size lowercase_ = DeiTForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase_ = 1 lowercase_ = DeiTForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = config_and_inputs lowercase_ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : str = ( { 'feature-extraction': DeiTModel, 'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : List[Any] = False __SCREAMING_SNAKE_CASE : List[Any] = False def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = DeiTModelTester(self ) lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = model_class(UpperCamelCase__ ) lowercase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ = [*signature.parameters.keys()] lowercase_ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False ): '''simple docstring''' lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' if not self.model_tester.is_training: return lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(UpperCamelCase__ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue lowercase_ = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.train() lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowercase_ = model(**UpperCamelCase__ ).loss loss.backward() def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return lowercase_ = False lowercase_ = True for model_class in self.all_model_classes: if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue lowercase_ = model_class(UpperCamelCase__ ) model.gradient_checkpointing_enable() model.to(UpperCamelCase__ ) model.train() lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowercase_ = model(**UpperCamelCase__ ).loss loss.backward() def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = [ {"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float}, {"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long}, {"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(UpperCamelCase__ ), *get_values(UpperCamelCase__ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ): lowercase_ = problem_type["""title"""] lowercase_ = problem_type["""num_labels"""] lowercase_ = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.train() lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) if problem_type["num_labels"] > 1: lowercase_ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] ) lowercase_ = inputs["""labels"""].to(problem_type["""dtype"""] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list: lowercase_ = model(**UpperCamelCase__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def UpperCAmelCase__ ( self : int ): '''simple docstring''' for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ = DeiTModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def UpperCAmelCase_ ( ): lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to( UpperCamelCase__ ) lowercase_ = self.default_image_processor lowercase_ = prepare_img() lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowercase_ = model(**UpperCamelCase__ ) # verify the logits lowercase_ = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowercase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = DeiTModel.from_pretrained( """facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" ) lowercase_ = self.default_image_processor lowercase_ = prepare_img() lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ) lowercase_ = inputs.pixel_values.to(UpperCamelCase__ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowercase_ = model(UpperCamelCase__ )
650
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin a = False @skip_mps class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : int = StableDiffusionAttendAndExcitePipeline __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS __SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} ) __SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS __SCREAMING_SNAKE_CASE : Any = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def UpperCAmelCase__ ( cls : List[Any] ): '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(UpperCamelCase__ ) @classmethod def UpperCAmelCase__ ( cls : Optional[Any] ): '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) lowercase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , ) lowercase_ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) torch.manual_seed(0 ) lowercase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowercase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , ) lowercase_ = CLIPTextModel(UpperCamelCase__ ) lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase_ = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=0 ): '''simple docstring''' if str(UpperCamelCase__ ).startswith("""mps""" ): lowercase_ = torch.manual_seed(UpperCamelCase__ ) else: lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowercase_ = lowercase_ = { """prompt""": """a cat and a frog""", """token_indices""": [2, 5], """generator""": generator, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", """max_iter_to_alter""": 2, """thresholds""": {0: 0.7}, } return inputs def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = """cpu""" lowercase_ = self.get_dummy_components() lowercase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowercase_ = pipe(**UpperCamelCase__ ).images lowercase_ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3) ) lowercase_ = np.array( [0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] ) lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase__ , 1e-3 ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' super().test_save_load_local(expected_max_difference=5e-4 ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' super().test_save_load_optional_components(expected_max_difference=4e-4 ) @require_torch_gpu @slow class UpperCamelCase__ ( unittest.TestCase ): @classmethod def UpperCAmelCase__ ( cls : Optional[Any] ): '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(UpperCamelCase__ ) @classmethod def UpperCAmelCase__ ( cls : Dict ): '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(UpperCamelCase__ ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = torch.manual_seed(51 ) lowercase_ = StableDiffusionAttendAndExcitePipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa ) pipe.to("""cuda""" ) lowercase_ = """a painting of an elephant with glasses""" lowercase_ = [5, 7] lowercase_ = pipe( prompt=UpperCamelCase__ , token_indices=UpperCamelCase__ , guidance_scale=7.5 , generator=UpperCamelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0] lowercase_ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" ) assert np.abs((expected_image - image).max() ) < 5e-1
650
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
650
1
from __future__ import annotations def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = 2 lowercase_ = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(UpperCAmelCase__ ) if n > 1: factors.append(UpperCAmelCase__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
650
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizer __SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizerFast __SCREAMING_SNAKE_CASE : List[Any] = True __SCREAMING_SNAKE_CASE : int = True def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = """<pad>""" lowercase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(len(UpperCamelCase__ ) , 1_008 ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) lowercase_ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowercase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowercase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def UpperCAmelCase__ ( self : str ): '''simple docstring''' return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(UpperCamelCase__ , f.name ) lowercase_ = XGLMTokenizer(f.name , keep_accents=UpperCamelCase__ ) lowercase_ = pickle.dumps(UpperCamelCase__ ) pickle.loads(UpperCamelCase__ ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' if not self.test_rust_tokenizer: return lowercase_ = self.get_tokenizer() lowercase_ = self.get_rust_tokenizer() lowercase_ = """I was born in 92000, and this is falsé.""" lowercase_ = tokenizer.tokenize(UpperCamelCase__ ) lowercase_ = rust_tokenizer.tokenize(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) lowercase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = self.get_rust_tokenizer() lowercase_ = tokenizer.encode(UpperCamelCase__ ) lowercase_ = rust_tokenizer.encode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = """Hello World!""" lowercase_ = [2, 31_227, 4_447, 35] self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) ) @slow def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth""" ) # fmt: off lowercase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) ) @slow def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = { """input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="""facebook/xglm-564M""" , padding=UpperCamelCase__ , )
650
1
import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig a = { 'facebook/maskformer-swin-base-ade': ( 'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json' ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } a = logging.get_logger(__name__) class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : List[Any] = 'maskformer' __SCREAMING_SNAKE_CASE : Union[str, Any] = {'hidden_size': 'mask_feature_size'} __SCREAMING_SNAKE_CASE : str = ['resnet', 'swin'] __SCREAMING_SNAKE_CASE : Dict = ['detr'] def __init__( self : Dict , UpperCamelCase__ : int = 256 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[Dict] = None , UpperCamelCase__ : Optional[Dict] = None , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : float = 20.0 , UpperCamelCase__ : Optional[bool] = None , **UpperCamelCase__ : Dict , ): '''simple docstring''' if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k lowercase_ = SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = backbone_config.pop("""model_type""" ) lowercase_ = CONFIG_MAPPING[backbone_model_type] lowercase_ = config_class.from_dict(UpperCamelCase__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. ''' F'''Supported model types: {",".join(self.backbones_supported )}''' ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 lowercase_ = DetrConfig() else: # verify that the decoder is supported lowercase_ = ( decoder_config.pop("""model_type""" ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( F'''Transformer Decoder {decoder_type} not supported, please use one of''' F''' {",".join(self.decoders_supported )}''' ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = CONFIG_MAPPING[decoder_type] lowercase_ = config_class.from_dict(UpperCamelCase__ ) lowercase_ = backbone_config lowercase_ = decoder_config # main feature dimension for the model lowercase_ = fpn_feature_size lowercase_ = mask_feature_size # initializer lowercase_ = init_std lowercase_ = init_xavier_std # Hungarian matcher && loss lowercase_ = cross_entropy_weight lowercase_ = dice_weight lowercase_ = mask_weight lowercase_ = use_auxiliary_loss lowercase_ = no_object_weight lowercase_ = output_auxiliary_logits lowercase_ = self.decoder_config.encoder_attention_heads lowercase_ = self.decoder_config.num_hidden_layers super().__init__(**UpperCamelCase__ ) @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , UpperCamelCase__ : PretrainedConfig , UpperCamelCase__ : PretrainedConfig , **UpperCamelCase__ : Any ): '''simple docstring''' return cls( backbone_config=UpperCamelCase__ , decoder_config=UpperCamelCase__ , **UpperCamelCase__ , ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' lowercase_ = copy.deepcopy(self.__dict__ ) lowercase_ = self.backbone_config.to_dict() lowercase_ = self.decoder_config.to_dict() lowercase_ = self.__class__.model_type return output
650
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: a = None a = logging.get_logger(__name__) a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} a = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 a = { 't5-small': 5_1_2, 't5-base': 5_1_2, 't5-large': 5_1_2, 't5-3b': 5_1_2, 't5-11b': 5_1_2, } class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask'] __SCREAMING_SNAKE_CASE : Dict = TaTokenizer __SCREAMING_SNAKE_CASE : List[int] = [] def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]=100 , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: lowercase_ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens lowercase_ = len(set(filter(lambda UpperCamelCase__ : bool("""extra_id_""" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) ) if extra_tokens != extra_ids: raise ValueError( F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , ) lowercase_ = vocab_file lowercase_ = False if not self.vocab_file else True lowercase_ = extra_ids @staticmethod def UpperCAmelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int ): '''simple docstring''' if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: lowercase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F''' {pretrained_model_name_or_path} automatically truncating your input to''' F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase__ , ) return max_model_length def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ): copyfile(self.vocab_file , UpperCamelCase__ ) logger.info(F'''Copy vocab file to {out_vocab_file}''' ) return (out_vocab_file,) def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase_ = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: lowercase_ = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return list( set(filter(lambda UpperCamelCase__ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
650
1
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker a = 'CompVis/stable-diffusion-v1-1' a = 'CompVis/stable-diffusion-v1-2' a = 'CompVis/stable-diffusion-v1-3' a = 'CompVis/stable-diffusion-v1-4' class UpperCamelCase__ ( __magic_name__ ): def __init__( self : List[Any] , UpperCamelCase__ : AutoencoderKL , UpperCamelCase__ : CLIPTextModel , UpperCamelCase__ : CLIPTokenizer , UpperCamelCase__ : UNetaDConditionModel , UpperCamelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase__ : StableDiffusionSafetyChecker , UpperCamelCase__ : CLIPImageProcessor , UpperCamelCase__ : bool = True , ): '''simple docstring''' super()._init_() lowercase_ = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ ) lowercase_ = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ ) lowercase_ = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ ) lowercase_ = StableDiffusionPipeline( vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , requires_safety_checker=UpperCamelCase__ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def UpperCAmelCase__ ( self : int ): '''simple docstring''' return {k: getattr(self , UpperCamelCase__ ) for k in self.config.keys() if not k.startswith("""_""" )} def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Optional[Union[str, int]] = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowercase_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' self.enable_attention_slicing(UpperCamelCase__ ) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, List[str]] , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 50 , UpperCamelCase__ : float = 7.5 , UpperCamelCase__ : Optional[Union[str, List[str]]] = None , UpperCamelCase__ : Optional[int] = 1 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase__ : int = 1 , **UpperCamelCase__ : Dict , ): '''simple docstring''' return self.pipea( prompt=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , **UpperCamelCase__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Union[str, List[str]] , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 50 , UpperCamelCase__ : float = 7.5 , UpperCamelCase__ : Optional[Union[str, List[str]]] = None , UpperCamelCase__ : Optional[int] = 1 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase__ : int = 1 , **UpperCamelCase__ : Dict , ): '''simple docstring''' return self.pipea( prompt=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , **UpperCamelCase__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Union[str, List[str]] , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 50 , UpperCamelCase__ : float = 7.5 , UpperCamelCase__ : Optional[Union[str, List[str]]] = None , UpperCamelCase__ : Optional[int] = 1 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase__ : int = 1 , **UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' return self.pipea( prompt=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , **UpperCamelCase__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Union[str, List[str]] , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 50 , UpperCamelCase__ : float = 7.5 , UpperCamelCase__ : Optional[Union[str, List[str]]] = None , UpperCamelCase__ : Optional[int] = 1 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase__ : int = 1 , **UpperCamelCase__ : Dict , ): '''simple docstring''' return self.pipea( prompt=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , **UpperCamelCase__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Union[str, List[str]] , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 50 , UpperCamelCase__ : float = 7.5 , UpperCamelCase__ : Optional[Union[str, List[str]]] = None , UpperCamelCase__ : Optional[int] = 1 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase__ : int = 1 , **UpperCamelCase__ : Dict , ): '''simple docstring''' lowercase_ = """cuda""" if torch.cuda.is_available() else """cpu""" self.to(UpperCamelCase__ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' ) # Get first result from Stable Diffusion Checkpoint v1.1 lowercase_ = self.textaimg_sda_a( prompt=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , **UpperCamelCase__ , ) # Get first result from Stable Diffusion Checkpoint v1.2 lowercase_ = self.textaimg_sda_a( prompt=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , **UpperCamelCase__ , ) # Get first result from Stable Diffusion Checkpoint v1.3 lowercase_ = self.textaimg_sda_a( prompt=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , **UpperCamelCase__ , ) # Get first result from Stable Diffusion Checkpoint v1.4 lowercase_ = self.textaimg_sda_a( prompt=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , **UpperCamelCase__ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
650
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionDiffEditPipeline __SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} __SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} __SCREAMING_SNAKE_CASE : int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __SCREAMING_SNAKE_CASE : Any = frozenset([] ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) lowercase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , ) lowercase_ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) lowercase_ = DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , ) torch.manual_seed(0 ) lowercase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowercase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , ) lowercase_ = CLIPTextModel(UpperCamelCase__ ) lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase_ = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=0 ): '''simple docstring''' lowercase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowercase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) if str(UpperCamelCase__ ).startswith("""mps""" ): lowercase_ = torch.manual_seed(UpperCamelCase__ ) else: lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowercase_ = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ): '''simple docstring''' lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ) if str(UpperCamelCase__ ).startswith("""mps""" ): lowercase_ = torch.manual_seed(UpperCamelCase__ ) else: lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowercase_ = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=0 ): '''simple docstring''' lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ) if str(UpperCamelCase__ ).startswith("""mps""" ): lowercase_ = torch.manual_seed(UpperCamelCase__ ) else: lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowercase_ = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : int ): '''simple docstring''' if not hasattr(self.pipeline_class , """_optional_components""" ): return lowercase_ = self.get_dummy_components() lowercase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) lowercase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowercase_ = pipe(**UpperCamelCase__ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCamelCase__ ) lowercase_ = self.pipeline_class.from_pretrained(UpperCamelCase__ ) pipe_loaded.to(UpperCamelCase__ ) pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ ) for optional_component in pipe._optional_components: self.assertTrue( getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , ) lowercase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowercase_ = pipe_loaded(**UpperCamelCase__ )[0] lowercase_ = np.abs(output - output_loaded ).max() self.assertLess(UpperCamelCase__ , 1e-4 ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = """cpu""" lowercase_ = self.get_dummy_components() lowercase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_dummy_mask_inputs(UpperCamelCase__ ) lowercase_ = pipe.generate_mask(**UpperCamelCase__ ) lowercase_ = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) lowercase_ = np.array([0] * 9 ) lowercase_ = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase__ , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = """cpu""" lowercase_ = self.get_dummy_components() lowercase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ ) lowercase_ = pipe.invert(**UpperCamelCase__ ).images lowercase_ = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowercase_ = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase__ , 1e-3 ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = """cpu""" lowercase_ = self.get_dummy_components() lowercase_ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""} lowercase_ = DPMSolverMultistepScheduler(**UpperCamelCase__ ) lowercase_ = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ ) lowercase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ ) lowercase_ = pipe.invert(**UpperCamelCase__ ).images lowercase_ = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowercase_ = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase__ , 1e-3 ) @require_torch_gpu @slow class UpperCamelCase__ ( unittest.TestCase ): def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def UpperCAmelCase__ ( cls : Dict ): '''simple docstring''' lowercase_ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" ) lowercase_ = raw_image.convert("""RGB""" ).resize((768, 768) ) lowercase_ = raw_image def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = torch.manual_seed(0 ) lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa ) lowercase_ = DDIMScheduler.from_config(pipe.scheduler.config ) lowercase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = """a bowl of fruit""" lowercase_ = """a bowl of pears""" lowercase_ = pipe.generate_mask( image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , ) lowercase_ = pipe.invert( prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents lowercase_ = pipe( prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0] lowercase_ = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1 def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = torch.manual_seed(0 ) lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa ) lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowercase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = """a bowl of fruit""" lowercase_ = """a bowl of pears""" lowercase_ = pipe.generate_mask( image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , ) lowercase_ = pipe.invert( prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents lowercase_ = pipe( prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0] lowercase_ = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1
650
1
def UpperCAmelCase_ ( UpperCAmelCase__ ): return " ".join( """""".join(word[::-1] ) if len(UpperCAmelCase__ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
650
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging a = logging.get_logger(__name__) class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : str = ['pixel_values'] def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 8 , **UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = do_rescale lowercase_ = rescale_factor lowercase_ = do_pad lowercase_ = pad_size def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] ): '''simple docstring''' return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None ): '''simple docstring''' lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ ) lowercase_ = (old_height // size + 1) * size - old_height lowercase_ = (old_width // size + 1) * size - old_width return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ): '''simple docstring''' lowercase_ = do_rescale if do_rescale is not None else self.do_rescale lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ = do_pad if do_pad is not None else self.do_pad lowercase_ = pad_size if pad_size is not None else self.pad_size lowercase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_rescale: lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_pad: lowercase_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] lowercase_ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
650
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : Optional[str] = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the training data.'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the validation data.'} ) __SCREAMING_SNAKE_CASE : Optional[float] = field( default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} ) __SCREAMING_SNAKE_CASE : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} ) __SCREAMING_SNAKE_CASE : float = field( default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = {} if self.train_dir is not None: lowercase_ = self.train_dir if self.validation_dir is not None: lowercase_ = self.validation_dir lowercase_ = data_files if data_files else None @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : str = field( default=__magic_name__ , metadata={ 'help': ( 'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ' 'checkpoint identifier on the hub. ' 'Don\'t set if you want to train a model from scratch.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , ) __SCREAMING_SNAKE_CASE : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) __SCREAMING_SNAKE_CASE : str = field(default=__magic_name__ , metadata={'help': 'Name or path of preprocessor config.'} ) __SCREAMING_SNAKE_CASE : bool = field( default=__magic_name__ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={'help': 'Stride to use for the encoder.'} , ) class UpperCamelCase__ : def __init__( self : Dict , UpperCamelCase__ : List[Any]=192 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : str=0.6 ): '''simple docstring''' lowercase_ = input_size lowercase_ = mask_patch_size lowercase_ = model_patch_size lowercase_ = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("""Input size must be divisible by mask patch size""" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("""Mask patch size must be divisible by model patch size""" ) lowercase_ = self.input_size // self.mask_patch_size lowercase_ = self.mask_patch_size // self.model_patch_size lowercase_ = self.rand_size**2 lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : int ): '''simple docstring''' lowercase_ = np.random.permutation(self.token_count )[: self.mask_count] lowercase_ = np.zeros(self.token_count , dtype=UpperCamelCase__ ) lowercase_ = 1 lowercase_ = mask.reshape((self.rand_size, self.rand_size) ) lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = torch.stack([example["""pixel_values"""] for example in examples] ) lowercase_ = torch.stack([example["""mask"""] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def UpperCAmelCase_ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mim""" , UpperCAmelCase__ , UpperCAmelCase__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase_ = training_args.get_process_log_level() logger.setLevel(UpperCAmelCase__ ) transformers.utils.logging.set_verbosity(UpperCAmelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowercase_ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase_ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. lowercase_ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. lowercase_ = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0: lowercase_ = ds["""train"""].train_test_split(data_args.train_val_split ) lowercase_ = split["""train"""] lowercase_ = split["""test"""] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase_ = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase__ ) elif model_args.model_name_or_path: lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ ) else: lowercase_ = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(UpperCAmelCase__ , """decoder_type""" ): lowercase_ = """simmim""" # adapt config lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size lowercase_ = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { """image_size""": model_args.image_size, """patch_size""": model_args.patch_size, """encoder_stride""": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ ) elif model_args.model_name_or_path: lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ ) else: lowercase_ = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: lowercase_ = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) lowercase_ = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase__ ) if training_args.do_train: lowercase_ = ds["""train"""].column_names else: lowercase_ = ds["""validation"""].column_names if data_args.image_column_name is not None: lowercase_ = data_args.image_column_name elif "image" in column_names: lowercase_ = """image""" elif "img" in column_names: lowercase_ = """img""" else: lowercase_ = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py lowercase_ = Compose( [ Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator lowercase_ = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(UpperCAmelCase__ ): lowercase_ = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]] lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: lowercase_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(UpperCAmelCase__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: lowercase_ = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(UpperCAmelCase__ ) # Initialize our trainer lowercase_ = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: lowercase_ = None if training_args.resume_from_checkpoint is not None: lowercase_ = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase_ = last_checkpoint lowercase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowercase_ = trainer.evaluate() trainer.log_metrics("""eval""" , UpperCAmelCase__ ) trainer.save_metrics("""eval""" , UpperCAmelCase__ ) # Write model card and (optionally) push to hub lowercase_ = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """masked-image-modeling""", """dataset""": data_args.dataset_name, """tags""": ["""masked-image-modeling"""], } if training_args.push_to_hub: trainer.push_to_hub(**UpperCAmelCase__ ) else: trainer.create_model_card(**UpperCAmelCase__ ) if __name__ == "__main__": main()
650
def UpperCAmelCase_ ( UpperCAmelCase__ ): if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise TypeError("""Input value must be an 'int' type""" ) lowercase_ = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
650
1
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase__ : def __init__( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : List[str]=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Optional[int]=37 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : str=4 , UpperCamelCase__ : List[Any]=None , ): '''simple docstring''' lowercase_ = parent lowercase_ = batch_size lowercase_ = seq_length lowercase_ = is_training lowercase_ = use_input_mask lowercase_ = use_token_type_ids lowercase_ = use_labels lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = num_labels lowercase_ = num_choices lowercase_ = scope def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ = None if self.use_input_mask: lowercase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ = None if self.use_token_type_ids: lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase_ = None lowercase_ = None lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase_ = ids_tensor([self.batch_size] , self.num_choices ) lowercase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = self.prepare_config_and_inputs() lowercase_ = True lowercase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ): '''simple docstring''' lowercase_ = NezhaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) lowercase_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , ): '''simple docstring''' lowercase_ = True lowercase_ = NezhaModel(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , ) lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , ) lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' lowercase_ = NezhaForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ): '''simple docstring''' lowercase_ = NezhaForNextSentencePrediction(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ): '''simple docstring''' lowercase_ = NezhaForPreTraining(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , next_sentence_label=UpperCamelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Dict ): '''simple docstring''' lowercase_ = NezhaForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ): '''simple docstring''' lowercase_ = self.num_labels lowercase_ = NezhaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ): '''simple docstring''' lowercase_ = self.num_labels lowercase_ = NezhaForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ): '''simple docstring''' lowercase_ = self.num_choices lowercase_ = NezhaForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = config_and_inputs lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : List[Any] = ( { 'feature-extraction': NezhaModel, 'fill-mask': NezhaForMaskedLM, 'question-answering': NezhaForQuestionAnswering, 'text-classification': NezhaForSequenceClassification, 'token-classification': NezhaForTokenClassification, 'zero-shot': NezhaForSequenceClassification, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Dict = True def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=False ): '''simple docstring''' lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) if return_labels: if model_class in get_values(UpperCamelCase__ ): lowercase_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ ) lowercase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ ) return inputs_dict def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = NezhaModelTester(self ) lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase_ = None self.model_tester.create_and_check_model_as_decoder( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ = NezhaModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @slow @require_torch_gpu def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return lowercase_ = True lowercase_ = model_class(config=UpperCamelCase__ ) lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = torch.jit.trace( UpperCamelCase__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """bert.pt""" ) ) lowercase_ = torch.jit.load(os.path.join(UpperCamelCase__ , """bert.pt""" ) , map_location=UpperCamelCase__ ) loaded(inputs_dict["""input_ids"""].to(UpperCamelCase__ ) , inputs_dict["""attention_mask"""].to(UpperCamelCase__ ) ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" ) lowercase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase_ = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0] lowercase_ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) lowercase_ = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4 ) ) @slow def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" ) lowercase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0] lowercase_ = torch.Size((1, 6, 21_128) ) self.assertEqual(output.shape , UpperCamelCase__ ) lowercase_ = torch.tensor( [[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4 ) )
650
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ): @register_to_config def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ): '''simple docstring''' super().__init__() lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = False lowercase_ = nn.Dropout(p=UpperCamelCase__ ) lowercase_ = TaConfig( vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , ) lowercase_ = nn.ModuleList() for lyr_num in range(UpperCamelCase__ ): lowercase_ = TaBlock(UpperCamelCase__ ) self.encoders.append(UpperCamelCase__ ) lowercase_ = TaLayerNorm(UpperCamelCase__ ) lowercase_ = nn.Dropout(p=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ): '''simple docstring''' lowercase_ = self.token_embedder(UpperCamelCase__ ) lowercase_ = encoder_input_tokens.shape[1] lowercase_ = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device ) x += self.position_encoding(UpperCamelCase__ ) lowercase_ = self.dropout_pre(UpperCamelCase__ ) # inverted the attention mask lowercase_ = encoder_input_tokens.size() lowercase_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ ) for lyr in self.encoders: lowercase_ = lyr(UpperCamelCase__ , UpperCamelCase__ )[0] lowercase_ = self.layer_norm(UpperCamelCase__ ) return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
650
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available a = { 'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ 'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ErnieForCausalLM', 'ErnieForMaskedLM', 'ErnieForMultipleChoice', 'ErnieForNextSentencePrediction', 'ErnieForPreTraining', 'ErnieForQuestionAnswering', 'ErnieForSequenceClassification', 'ErnieForTokenClassification', 'ErnieModel', 'ErniePreTrainedModel', ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
650
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar a = TypeVar('T') class UpperCamelCase__ ( Generic[T] ): __SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys __SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache __SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache def __init__( self : str , UpperCamelCase__ : int ): '''simple docstring''' lowercase_ = deque() lowercase_ = set() if not n: lowercase_ = sys.maxsize elif n < 0: raise ValueError("""n should be an integer greater than 0.""" ) else: lowercase_ = n def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : T ): '''simple docstring''' if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: lowercase_ = self.dq_store.pop() self.key_reference.remove(UpperCamelCase__ ) else: self.dq_store.remove(UpperCamelCase__ ) self.dq_store.appendleft(UpperCamelCase__ ) self.key_reference.add(UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' for k in self.dq_store: print(UpperCamelCase__ ) def __repr__( self : Optional[Any] ): '''simple docstring''' return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() a = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
650
1
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = len(UpperCAmelCase__ ) + 1 lowercase_ = len(UpperCAmelCase__ ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. lowercase_ = [[0 for i in range(UpperCAmelCase__ )] for j in range(UpperCAmelCase__ )] # since string of zero length match pattern of zero length lowercase_ = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , UpperCAmelCase__ ): lowercase_ = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , UpperCAmelCase__ ): lowercase_ = dp[0][j - 2] if pattern[j - 1] == """*""" else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , UpperCAmelCase__ ): for j in range(1 , UpperCAmelCase__ ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": lowercase_ = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: lowercase_ = 1 elif pattern[j - 2] in (input_string[i - 1], "."): lowercase_ = dp[i - 1][j] else: lowercase_ = 0 else: lowercase_ = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") a = 'aab' a = 'c*a*b' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(F'''{input_string} matches the given pattern {pattern}''') else: print(F'''{input_string} does not match with the given pattern {pattern}''')
650
def UpperCAmelCase_ ( UpperCAmelCase__ ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
650
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging a = logging.get_logger(__name__) a = { 'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json', 'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json', 'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json', 'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json', 'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json', 'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json', } class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Union[str, Any] = 'bloom' __SCREAMING_SNAKE_CASE : Any = ['past_key_values'] __SCREAMING_SNAKE_CASE : List[Any] = { 'num_hidden_layers': 'n_layer', 'num_attention_heads': 'n_head', } def __init__( self : Any , UpperCamelCase__ : Dict=250_880 , UpperCamelCase__ : str=64 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : Optional[int]=1e-5 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : str=False , **UpperCamelCase__ : Tuple , ): '''simple docstring''' lowercase_ = vocab_size # Backward compatibility with n_embed kwarg lowercase_ = kwargs.pop("""n_embed""" , UpperCamelCase__ ) lowercase_ = hidden_size if n_embed is None else n_embed lowercase_ = n_layer lowercase_ = n_head lowercase_ = layer_norm_epsilon lowercase_ = initializer_range lowercase_ = use_cache lowercase_ = pretraining_tp lowercase_ = apply_residual_connection_post_layernorm lowercase_ = hidden_dropout lowercase_ = attention_dropout lowercase_ = bos_token_id lowercase_ = eos_token_id lowercase_ = slow_but_exact super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Optional[int] = version.parse('1.12' ) def __init__( self : List[Any] , UpperCamelCase__ : PretrainedConfig , UpperCamelCase__ : str = "default" , UpperCamelCase__ : List[PatchingSpec] = None , UpperCamelCase__ : bool = False , ): '''simple docstring''' super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ ) if not getattr(self._config , """pad_token_id""" , UpperCamelCase__ ): # TODO: how to do that better? lowercase_ = 0 @property def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(UpperCamelCase__ , direction="""inputs""" , inverted_values_shape=UpperCamelCase__ ) lowercase_ = {0: """batch""", 1: """past_sequence + sequence"""} else: lowercase_ = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' return self._config.n_layer @property def UpperCAmelCase__ ( self : str ): '''simple docstring''' return self._config.n_head @property def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' return 1e-3 def UpperCAmelCase__ ( self : int , UpperCamelCase__ : "PreTrainedTokenizer" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , ): '''simple docstring''' lowercase_ = super(UpperCamelCase__ , self ).generate_dummy_inputs( UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ ) # We need to order the input in the way they appears in the forward() lowercase_ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowercase_ , lowercase_ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowercase_ = seqlen + 2 lowercase_ = self._config.hidden_size // self.num_attention_heads lowercase_ = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) lowercase_ = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) lowercase_ = [ (torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers ) ] lowercase_ = common_inputs["""attention_mask"""] if self.use_past: lowercase_ = ordered_inputs["""attention_mask"""].dtype lowercase_ = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 ) return ordered_inputs @property def UpperCAmelCase__ ( self : str ): '''simple docstring''' return 13
650
def UpperCAmelCase_ ( UpperCAmelCase__=2_8_1_2_3 ): lowercase_ = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i lowercase_ = set() lowercase_ = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(UpperCAmelCase__ ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
650
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { 'microsoft/unispeech-large-1500h-cv': ( 'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Dict = 'unispeech' def __init__( self : str , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : List[str]=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : int=3_072 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : Dict=1e-5 , UpperCamelCase__ : List[str]="group" , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase__ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : List[str]=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int=128 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : List[str]=10 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : int=10 , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : str=320 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Any=100 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[Any]=256 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Dict="mean" , UpperCamelCase__ : str=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : int=256 , UpperCamelCase__ : List[str]=80 , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Optional[Any]=0.5 , **UpperCamelCase__ : Dict , ): '''simple docstring''' super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) lowercase_ = hidden_size lowercase_ = feat_extract_norm lowercase_ = feat_extract_activation lowercase_ = list(UpperCamelCase__ ) lowercase_ = list(UpperCamelCase__ ) lowercase_ = list(UpperCamelCase__ ) lowercase_ = conv_bias lowercase_ = num_conv_pos_embeddings lowercase_ = num_conv_pos_embedding_groups lowercase_ = len(self.conv_dim ) lowercase_ = num_hidden_layers lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = num_attention_heads lowercase_ = hidden_dropout lowercase_ = attention_dropout lowercase_ = activation_dropout lowercase_ = feat_proj_dropout lowercase_ = final_dropout lowercase_ = layerdrop lowercase_ = layer_norm_eps lowercase_ = initializer_range lowercase_ = num_ctc_classes lowercase_ = vocab_size lowercase_ = do_stable_layer_norm lowercase_ = use_weighted_layer_sum lowercase_ = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase_ = apply_spec_augment lowercase_ = mask_time_prob lowercase_ = mask_time_length lowercase_ = mask_time_min_masks lowercase_ = mask_feature_prob lowercase_ = mask_feature_length lowercase_ = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase_ = num_codevectors_per_group lowercase_ = num_codevector_groups lowercase_ = contrastive_logits_temperature lowercase_ = feat_quantizer_dropout lowercase_ = num_negatives lowercase_ = codevector_dim lowercase_ = proj_codevector_dim lowercase_ = diversity_loss_weight # ctc loss lowercase_ = ctc_loss_reduction lowercase_ = ctc_zero_infinity # pretraining loss lowercase_ = replace_prob @property def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
650
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class UpperCamelCase__ : def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ): '''simple docstring''' lowercase_ = parent lowercase_ = batch_size lowercase_ = seq_length lowercase_ = is_training lowercase_ = use_input_mask lowercase_ = use_token_type_ids lowercase_ = use_labels lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = num_labels lowercase_ = num_choices lowercase_ = scope def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ = None if self.use_input_mask: lowercase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ = None if self.use_token_type_ids: lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase_ = None lowercase_ = None lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase_ = ids_tensor([self.batch_size] , self.num_choices ) lowercase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ): '''simple docstring''' lowercase_ = OpenLlamaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' lowercase_ = True lowercase_ = OpenLlamaModel(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , ) lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , ) lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ): '''simple docstring''' lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ): '''simple docstring''' lowercase_ = True lowercase_ = True lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() # first forward pass lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , ) lowercase_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 ) lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0] lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0] # select random slice lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = config_and_inputs lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else () __SCREAMING_SNAKE_CASE : List[Any] = ( { 'feature-extraction': OpenLlamaModel, 'text-classification': OpenLlamaForSequenceClassification, 'text-generation': OpenLlamaForCausalLM, 'zero-shot': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : Optional[int] = False def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = OpenLlamaModelTester(self ) lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = 3 lowercase_ = input_dict["""input_ids"""] lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ ) lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = 3 lowercase_ = """single_label_classification""" lowercase_ = input_dict["""input_ids"""] lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ ) lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = 3 lowercase_ = """multi_label_classification""" lowercase_ = input_dict["""input_ids"""] lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ ) lowercase_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = ids_tensor([1, 10] , config.vocab_size ) lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase_ = OpenLlamaModel(UpperCamelCase__ ) original_model.to(UpperCamelCase__ ) original_model.eval() lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase_ = {"""type""": scaling_type, """factor""": 10.0} lowercase_ = OpenLlamaModel(UpperCamelCase__ ) scaled_model.to(UpperCamelCase__ ) scaled_model.eval() lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
650
1
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class UpperCamelCase__ : def __init__( self : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : int=36 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : str=4 , UpperCamelCase__ : Any=37 , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : str=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : List[Any]=6 , UpperCamelCase__ : Tuple=6 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=1_000 , ): '''simple docstring''' lowercase_ = parent lowercase_ = batch_size lowercase_ = num_channels lowercase_ = image_size lowercase_ = patch_size lowercase_ = text_seq_length lowercase_ = is_training lowercase_ = use_input_mask lowercase_ = use_token_type_ids lowercase_ = use_labels lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = coordinate_size lowercase_ = shape_size lowercase_ = num_labels lowercase_ = num_choices lowercase_ = scope lowercase_ = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) lowercase_ = text_seq_length lowercase_ = (image_size // patch_size) ** 2 + 1 lowercase_ = self.text_seq_length + self.image_seq_length def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) lowercase_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowercase_ = bbox[i, j, 3] lowercase_ = bbox[i, j, 1] lowercase_ = t if bbox[i, j, 2] < bbox[i, j, 0]: lowercase_ = bbox[i, j, 2] lowercase_ = bbox[i, j, 0] lowercase_ = t lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ = None if self.use_input_mask: lowercase_ = random_attention_mask([self.batch_size, self.text_seq_length] ) lowercase_ = None if self.use_token_type_ids: lowercase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) lowercase_ = None lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) lowercase_ = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] ): '''simple docstring''' lowercase_ = LayoutLMvaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() # text + image lowercase_ = model(UpperCamelCase__ , pixel_values=UpperCamelCase__ ) lowercase_ = model( UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) lowercase_ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) lowercase_ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only lowercase_ = model(pixel_values=UpperCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ): '''simple docstring''' lowercase_ = self.num_labels lowercase_ = LayoutLMvaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model( UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int ): '''simple docstring''' lowercase_ = self.num_labels lowercase_ = LayoutLMvaForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model( UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ): '''simple docstring''' lowercase_ = LayoutLMvaForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model( UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = config_and_inputs lowercase_ = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : str = False __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : Tuple = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : Union[str, Any] = ( {'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel} if is_torch_available() else {} ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Dict ): '''simple docstring''' return True def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = LayoutLMvaModelTester(self ) lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int]=False ): '''simple docstring''' lowercase_ = copy.deepcopy(UpperCamelCase__ ) if model_class in get_values(UpperCamelCase__ ): lowercase_ = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(UpperCamelCase__ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(UpperCamelCase__ ): lowercase_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ ) elif model_class in get_values(UpperCamelCase__ ): lowercase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ ) lowercase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ ) elif model_class in [ *get_values(UpperCamelCase__ ), ]: lowercase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ ) elif model_class in [ *get_values(UpperCamelCase__ ), ]: lowercase_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCamelCase__ , ) return inputs_dict def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) @slow def UpperCAmelCase__ ( self : Any ): '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ = LayoutLMvaModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def UpperCAmelCase_ ( ): lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class UpperCamelCase__ ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) if is_vision_available() else None @slow def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(UpperCamelCase__ ) lowercase_ = self.default_image_processor lowercase_ = prepare_img() lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values.to(UpperCamelCase__ ) lowercase_ = torch.tensor([[1, 2]] ) lowercase_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass lowercase_ = model( input_ids=input_ids.to(UpperCamelCase__ ) , bbox=bbox.to(UpperCamelCase__ ) , pixel_values=pixel_values.to(UpperCamelCase__ ) , ) # verify the logits lowercase_ = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ ) lowercase_ = torch.tensor( [[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
650
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: a = False a = logging.get_logger(__name__) a = 'ybelkada/fonts' def UpperCAmelCase_ ( ): if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ''' """Pix2StructImageProcessor. Please upgrade torch.""" ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): requires_backends(UpperCAmelCase__ , ["""torch"""] ) _check_torch_version() lowercase_ = image_tensor.unsqueeze(0 ) lowercase_ = torch.nn.functional.unfold(UpperCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) ) lowercase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase__ , UpperCAmelCase__ , -1 ) lowercase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 3_6 , UpperCAmelCase__ = "black" , UpperCAmelCase__ = "white" , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , ): requires_backends(UpperCAmelCase__ , """vision""" ) # Add new lines so that each line is no more than 80 characters. lowercase_ = textwrap.TextWrapper(width=8_0 ) lowercase_ = wrapper.wrap(text=UpperCAmelCase__ ) lowercase_ = """\n""".join(UpperCAmelCase__ ) if font_bytes is not None and font_path is None: lowercase_ = io.BytesIO(UpperCAmelCase__ ) elif font_path is not None: lowercase_ = font_path else: lowercase_ = hf_hub_download(UpperCAmelCase__ , """Arial.TTF""" ) lowercase_ = ImageFont.truetype(UpperCAmelCase__ , encoding="""UTF-8""" , size=UpperCAmelCase__ ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. lowercase_ = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , UpperCAmelCase__ ) ) lowercase_ , lowercase_ , lowercase_ , lowercase_ = temp_draw.textbbox((0, 0) , UpperCAmelCase__ , UpperCAmelCase__ ) # Create the actual image with a bit of padding around the text. lowercase_ = text_width + left_padding + right_padding lowercase_ = text_height + top_padding + bottom_padding lowercase_ = Image.new("""RGB""" , (image_width, image_height) , UpperCAmelCase__ ) lowercase_ = ImageDraw.Draw(UpperCAmelCase__ ) draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase__ , fill=UpperCAmelCase__ , font=UpperCAmelCase__ ) return image def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ): requires_backends(UpperCAmelCase__ , """vision""" ) # Convert to PIL image if necessary lowercase_ = to_pil_image(UpperCAmelCase__ ) lowercase_ = render_text(UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase_ = max(header_image.width , image.width ) lowercase_ = int(image.height * (new_width / image.width) ) lowercase_ = int(header_image.height * (new_width / header_image.width) ) lowercase_ = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary lowercase_ = to_numpy_array(UpperCAmelCase__ ) if infer_channel_dimension_format(UpperCAmelCase__ ) == ChannelDimension.LAST: lowercase_ = to_channel_dimension_format(UpperCAmelCase__ , ChannelDimension.LAST ) return new_image class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Tuple = ['flattened_patches'] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 2_048 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} lowercase_ = do_normalize lowercase_ = do_convert_rgb lowercase_ = max_patches lowercase_ = is_vqa def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : dict , **UpperCamelCase__ : Optional[int] ): '''simple docstring''' requires_backends(self.extract_flattened_patches , """torch""" ) _check_torch_version() # convert to torch lowercase_ = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST ) lowercase_ = torch.from_numpy(UpperCamelCase__ ) lowercase_ , lowercase_ = patch_size["""height"""], patch_size["""width"""] lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ ) # maximize scale s.t. lowercase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) lowercase_ = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 ) lowercase_ = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 ) lowercase_ = max(num_feasible_rows * patch_height , 1 ) lowercase_ = max(num_feasible_cols * patch_width , 1 ) lowercase_ = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] lowercase_ = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = patches.shape lowercase_ = patches_shape[1] lowercase_ = patches_shape[2] lowercase_ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] lowercase_ = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] lowercase_ = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] ) lowercase_ = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] lowercase_ = row_ids.to(torch.floataa ) lowercase_ = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] lowercase_ = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] lowercase_ = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float() lowercase_ = to_numpy_array(UpperCamelCase__ ) return result def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict ): '''simple docstring''' if image.dtype == np.uinta: lowercase_ = image.astype(np.floataa ) # take mean across the whole `image` lowercase_ = np.mean(UpperCamelCase__ ) lowercase_ = np.std(UpperCamelCase__ ) lowercase_ = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ): '''simple docstring''' lowercase_ = do_normalize if do_normalize is not None else self.do_normalize lowercase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase_ = patch_size if patch_size is not None else self.patch_size lowercase_ = max_patches if max_patches is not None else self.max_patches lowercase_ = self.is_vqa if kwargs.get("""data_format""" , UpperCamelCase__ ) is not None: raise ValueError("""data_format is not an accepted input as the outputs are """ ) lowercase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase_ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if is_vqa: if header_text is None: raise ValueError("""A header text must be provided for VQA models.""" ) lowercase_ = kwargs.pop("""font_bytes""" , UpperCamelCase__ ) lowercase_ = kwargs.pop("""font_path""" , UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = [header_text] * len(UpperCamelCase__ ) lowercase_ = [ render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ ) for i, image in enumerate(UpperCamelCase__ ) ] if do_normalize: lowercase_ = [self.normalize(image=UpperCamelCase__ ) for image in images] # convert to torch tensor and permute lowercase_ = [ self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ ) for image in images ] # create attention mask in numpy lowercase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] lowercase_ = BatchFeature( data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=UpperCamelCase__ ) return encoded_outputs
650
1
def UpperCAmelCase_ ( UpperCAmelCase__ ): return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(UpperCAmelCase__ ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__('doctest').testmod()
650
import cva import numpy as np class UpperCamelCase__ : def __init__( self : List[str] , UpperCamelCase__ : float , UpperCamelCase__ : int ): '''simple docstring''' if k in (0.04, 0.06): lowercase_ = k lowercase_ = window_size else: raise ValueError("""invalid k value""" ) def __str__( self : Optional[int] ): '''simple docstring''' return str(self.k ) def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ): '''simple docstring''' lowercase_ = cva.imread(UpperCamelCase__ , 0 ) lowercase_ , lowercase_ = img.shape lowercase_ = [] lowercase_ = img.copy() lowercase_ = cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB ) lowercase_ , lowercase_ = np.gradient(UpperCamelCase__ ) lowercase_ = dx**2 lowercase_ = dy**2 lowercase_ = dx * dy lowercase_ = 0.04 lowercase_ = self.window_size // 2 for y in range(UpperCamelCase__ , h - offset ): for x in range(UpperCamelCase__ , w - offset ): lowercase_ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase_ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase_ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase_ = (wxx * wyy) - (wxy**2) lowercase_ = wxx + wyy lowercase_ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": a = HarrisCorner(0.04, 3) a , a = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
650
1
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { 'nvidia/segformer-b0-finetuned-ade-512-512': ( 'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = 'segformer' def __init__( self : int , UpperCamelCase__ : Any=3 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Optional[Any]=[2, 2, 2, 2] , UpperCamelCase__ : Optional[Any]=[8, 4, 2, 1] , UpperCamelCase__ : Any=[32, 64, 160, 256] , UpperCamelCase__ : Optional[Any]=[7, 3, 3, 3] , UpperCamelCase__ : Optional[int]=[4, 2, 2, 2] , UpperCamelCase__ : Optional[int]=[1, 2, 5, 8] , UpperCamelCase__ : Any=[4, 4, 4, 4] , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Any=1e-6 , UpperCamelCase__ : Tuple=256 , UpperCamelCase__ : Optional[Any]=255 , **UpperCamelCase__ : int , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( """Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be""" """ removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase__ , ) lowercase_ = num_channels lowercase_ = num_encoder_blocks lowercase_ = depths lowercase_ = sr_ratios lowercase_ = hidden_sizes lowercase_ = patch_sizes lowercase_ = strides lowercase_ = mlp_ratios lowercase_ = num_attention_heads lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = classifier_dropout_prob lowercase_ = initializer_range lowercase_ = drop_path_rate lowercase_ = layer_norm_eps lowercase_ = decoder_hidden_size lowercase_ = kwargs.get("""reshape_last_stage""" , UpperCamelCase__ ) lowercase_ = semantic_loss_ignore_index class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : List[Any] = version.parse('1.11' ) @property def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' return 1e-4 @property def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' return 12
650
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): a = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: a = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = (images / 2 + 0.5).clamp(0 , 1 ) lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowercase_ = numpy_to_pil(UpperCAmelCase__ ) return images def UpperCAmelCase_ ( UpperCAmelCase__ ): if images.ndim == 3: lowercase_ = images[None, ...] lowercase_ = (images * 2_5_5).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowercase_ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images] else: lowercase_ = [Image.fromarray(UpperCAmelCase__ ) for image in images] return pil_images
650
1
def UpperCAmelCase_ ( UpperCAmelCase__ ): if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" ) if len(UpperCAmelCase__ ) == 0: raise ValueError("""Input list must be a non empty list""" ) if len(UpperCAmelCase__ ) == 1: return True lowercase_ = series[1] - series[0] for index in range(len(UpperCAmelCase__ ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def UpperCAmelCase_ ( UpperCAmelCase__ ): if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" ) if len(UpperCAmelCase__ ) == 0: raise ValueError("""Input list must be a non empty list""" ) lowercase_ = 0 for val in series: answer += val return answer / len(UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
650
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : str = (UnCLIPScheduler,) def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : int ): '''simple docstring''' lowercase_ = { """num_train_timesteps""": 1_000, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**UpperCamelCase__ ) return config def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config(variance_type="""fixed_small_log""" ) lowercase_ = scheduler_class(**UpperCamelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5 def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config(variance_type="""learned_range""" ) lowercase_ = scheduler_class(**UpperCamelCase__ ) lowercase_ = 0.5 assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1e-5 def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config() lowercase_ = scheduler_class(**UpperCamelCase__ ) lowercase_ = scheduler.timesteps lowercase_ = self.dummy_model() lowercase_ = self.dummy_sample_deter lowercase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ ) # 2. predict previous mean of sample x_t-1 lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample lowercase_ = pred_prev_sample lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3 def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config() lowercase_ = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(25 ) lowercase_ = scheduler.timesteps lowercase_ = self.dummy_model() lowercase_ = self.dummy_sample_deter lowercase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ ) if i + 1 == timesteps.shape[0]: lowercase_ = None else: lowercase_ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowercase_ = scheduler.step( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample lowercase_ = pred_prev_sample lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3 def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' pass def UpperCAmelCase__ ( self : int ): '''simple docstring''' pass
650
1
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration a = [ # tf -> hf ('/', '.'), ('layer_', 'layers.'), ('kernel', 'weight'), ('beta', 'bias'), ('gamma', 'weight'), ('pegasus', 'model'), ] a = [ ('.output.dense', '.fc2'), ('intermediate.LayerNorm', 'final_layer_norm'), ('intermediate.dense', 'fc1'), ] a = ( INIT_COMMON + [ ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.out_proj'), ('attention.self', 'self_attn'), ('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'), ('attention.encdec_output.dense', 'encoder_attn.out_proj'), ('attention.encdec', 'encoder_attn'), ('key', 'k_proj'), ('value', 'v_proj'), ('query', 'q_proj'), ('decoder.LayerNorm', 'decoder.layernorm_embedding'), ] + END_COMMON ) a = ( INIT_COMMON + [ ('embeddings.word_embeddings', 'shared.weight'), ('embeddings.position_embeddings', 'embed_positions.weight'), ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.output'), ('attention.self', 'self_attn.self'), ('encoder.LayerNorm', 'encoder.layernorm_embedding'), ] + END_COMMON ) a = [ 'encdec/key/bias', 'encdec/query/bias', 'encdec/value/bias', 'self/key/bias', 'self/query/bias', 'self/value/bias', 'encdec_output/dense/bias', 'attention/output/dense/bias', ] def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): for tf_name, hf_name in patterns: lowercase_ = k.replace(UpperCAmelCase__ , UpperCAmelCase__ ) return k def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = BigBirdPegasusConfig(**UpperCAmelCase__ ) lowercase_ = BigBirdPegasusForConditionalGeneration(UpperCAmelCase__ ) lowercase_ = torch_model.state_dict() lowercase_ = {} # separating decoder weights lowercase_ = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )} lowercase_ = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )} for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ): lowercase_ = [k.endswith(UpperCAmelCase__ ) for ending in KEYS_TO_IGNORE] if any(UpperCAmelCase__ ): continue lowercase_ = DECODER_PATTERNS lowercase_ = rename_state_dict_key(UpperCAmelCase__ , UpperCAmelCase__ ) if new_k not in state_dict: raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): lowercase_ = v.T lowercase_ = torch.from_numpy(UpperCAmelCase__ ) assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ): lowercase_ = [k.endswith(UpperCAmelCase__ ) for ending in KEYS_TO_IGNORE] if any(UpperCAmelCase__ ): continue lowercase_ = REMAINING_PATTERNS lowercase_ = rename_state_dict_key(UpperCAmelCase__ , UpperCAmelCase__ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): lowercase_ = v.T lowercase_ = torch.from_numpy(UpperCAmelCase__ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' lowercase_ = mapping["""model.embed_positions.weight"""] lowercase_ = mapping.pop("""model.embed_positions.weight""" ) lowercase_ , lowercase_ = torch_model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ ) lowercase_ = [ k for k in missing if k not in [ """final_logits_bias""", """model.encoder.embed_tokens.weight""", """model.decoder.embed_tokens.weight""", """lm_head.weight""", ] ] assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], F'''no matches found for the following tf keys {extra}''' return torch_model def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = tf.train.list_variables(UpperCAmelCase__ ) lowercase_ = {} lowercase_ = ["""global_step"""] for name, shape in tqdm(UpperCAmelCase__ , desc="""converting tf checkpoint to dict""" ): lowercase_ = any(pat in name for pat in ignore_name ) if skip_key: continue lowercase_ = tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ = array return tf_weights def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = get_tf_weights_as_numpy(UpperCAmelCase__ ) lowercase_ = convert_bigbird_pegasus(UpperCAmelCase__ , UpperCAmelCase__ ) torch_model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') a = parser.parse_args() a = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
650
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : Optional[str] = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the training data.'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the validation data.'} ) __SCREAMING_SNAKE_CASE : Optional[float] = field( default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} ) __SCREAMING_SNAKE_CASE : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} ) __SCREAMING_SNAKE_CASE : float = field( default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = {} if self.train_dir is not None: lowercase_ = self.train_dir if self.validation_dir is not None: lowercase_ = self.validation_dir lowercase_ = data_files if data_files else None @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : str = field( default=__magic_name__ , metadata={ 'help': ( 'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ' 'checkpoint identifier on the hub. ' 'Don\'t set if you want to train a model from scratch.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , ) __SCREAMING_SNAKE_CASE : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) __SCREAMING_SNAKE_CASE : str = field(default=__magic_name__ , metadata={'help': 'Name or path of preprocessor config.'} ) __SCREAMING_SNAKE_CASE : bool = field( default=__magic_name__ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={'help': 'Stride to use for the encoder.'} , ) class UpperCamelCase__ : def __init__( self : Dict , UpperCamelCase__ : List[Any]=192 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : str=0.6 ): '''simple docstring''' lowercase_ = input_size lowercase_ = mask_patch_size lowercase_ = model_patch_size lowercase_ = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("""Input size must be divisible by mask patch size""" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("""Mask patch size must be divisible by model patch size""" ) lowercase_ = self.input_size // self.mask_patch_size lowercase_ = self.mask_patch_size // self.model_patch_size lowercase_ = self.rand_size**2 lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : int ): '''simple docstring''' lowercase_ = np.random.permutation(self.token_count )[: self.mask_count] lowercase_ = np.zeros(self.token_count , dtype=UpperCamelCase__ ) lowercase_ = 1 lowercase_ = mask.reshape((self.rand_size, self.rand_size) ) lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = torch.stack([example["""pixel_values"""] for example in examples] ) lowercase_ = torch.stack([example["""mask"""] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def UpperCAmelCase_ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mim""" , UpperCAmelCase__ , UpperCAmelCase__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase_ = training_args.get_process_log_level() logger.setLevel(UpperCAmelCase__ ) transformers.utils.logging.set_verbosity(UpperCAmelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowercase_ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase_ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. lowercase_ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. lowercase_ = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0: lowercase_ = ds["""train"""].train_test_split(data_args.train_val_split ) lowercase_ = split["""train"""] lowercase_ = split["""test"""] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase_ = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase__ ) elif model_args.model_name_or_path: lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ ) else: lowercase_ = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(UpperCAmelCase__ , """decoder_type""" ): lowercase_ = """simmim""" # adapt config lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size lowercase_ = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { """image_size""": model_args.image_size, """patch_size""": model_args.patch_size, """encoder_stride""": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ ) elif model_args.model_name_or_path: lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ ) else: lowercase_ = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: lowercase_ = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) lowercase_ = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase__ ) if training_args.do_train: lowercase_ = ds["""train"""].column_names else: lowercase_ = ds["""validation"""].column_names if data_args.image_column_name is not None: lowercase_ = data_args.image_column_name elif "image" in column_names: lowercase_ = """image""" elif "img" in column_names: lowercase_ = """img""" else: lowercase_ = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py lowercase_ = Compose( [ Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator lowercase_ = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(UpperCAmelCase__ ): lowercase_ = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]] lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: lowercase_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(UpperCAmelCase__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: lowercase_ = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(UpperCAmelCase__ ) # Initialize our trainer lowercase_ = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: lowercase_ = None if training_args.resume_from_checkpoint is not None: lowercase_ = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase_ = last_checkpoint lowercase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowercase_ = trainer.evaluate() trainer.log_metrics("""eval""" , UpperCAmelCase__ ) trainer.save_metrics("""eval""" , UpperCAmelCase__ ) # Write model card and (optionally) push to hub lowercase_ = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """masked-image-modeling""", """dataset""": data_args.dataset_name, """tags""": ["""masked-image-modeling"""], } if training_args.push_to_hub: trainer.push_to_hub(**UpperCAmelCase__ ) else: trainer.create_model_card(**UpperCAmelCase__ ) if __name__ == "__main__": main()
650
1
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : str = (UnCLIPScheduler,) def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : int ): '''simple docstring''' lowercase_ = { """num_train_timesteps""": 1_000, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**UpperCamelCase__ ) return config def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config(variance_type="""fixed_small_log""" ) lowercase_ = scheduler_class(**UpperCamelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5 def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config(variance_type="""learned_range""" ) lowercase_ = scheduler_class(**UpperCamelCase__ ) lowercase_ = 0.5 assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1e-5 def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config() lowercase_ = scheduler_class(**UpperCamelCase__ ) lowercase_ = scheduler.timesteps lowercase_ = self.dummy_model() lowercase_ = self.dummy_sample_deter lowercase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ ) # 2. predict previous mean of sample x_t-1 lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample lowercase_ = pred_prev_sample lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3 def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config() lowercase_ = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(25 ) lowercase_ = scheduler.timesteps lowercase_ = self.dummy_model() lowercase_ = self.dummy_sample_deter lowercase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ ) if i + 1 == timesteps.shape[0]: lowercase_ = None else: lowercase_ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowercase_ = scheduler.step( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample lowercase_ = pred_prev_sample lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3 def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' pass def UpperCAmelCase__ ( self : int ): '''simple docstring''' pass
650
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging a = logging.get_logger(__name__) class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values'] def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Dict , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = size if size is not None else {"""shortest_edge""": 224} lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" ) lowercase_ = do_resize lowercase_ = size lowercase_ = resample lowercase_ = do_center_crop lowercase_ = crop_size lowercase_ = do_rescale lowercase_ = rescale_factor lowercase_ = do_normalize lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowercase_ = int((256 / 224) * size["""shortest_edge"""] ) lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ ) lowercase_ = {"""height""": output_size[0], """width""": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ): '''simple docstring''' lowercase_ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ): '''simple docstring''' return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' lowercase_ = do_resize if do_resize is not None else self.do_resize lowercase_ = resample if resample is not None else self.resample lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase_ = do_rescale if do_rescale is not None else self.do_rescale lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ = do_normalize if do_normalize is not None else self.do_normalize lowercase_ = image_mean if image_mean is not None else self.image_mean lowercase_ = image_std if image_std is not None else self.image_std lowercase_ = size if size is not None else self.size lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) lowercase_ = crop_size if crop_size is not None else self.crop_size lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" ) lowercase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: lowercase_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images] if do_center_crop: lowercase_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images] if do_rescale: lowercase_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images] if do_normalize: lowercase_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images] lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] lowercase_ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
650
1
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed a = 'true' def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=8_2 , UpperCAmelCase__=1_6 ): set_seed(4_2 ) lowercase_ = RegressionModel() lowercase_ = deepcopy(UpperCAmelCase__ ) lowercase_ = RegressionDataset(length=UpperCAmelCase__ ) lowercase_ = DataLoader(UpperCAmelCase__ , batch_size=UpperCAmelCase__ ) model.to(accelerator.device ) lowercase_ , lowercase_ = accelerator.prepare(UpperCAmelCase__ , UpperCAmelCase__ ) return model, ddp_model, dataloader def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=False ): lowercase_ = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" ) lowercase_ = load_dataset("""glue""" , """mrpc""" , split="""validation""" ) def tokenize_function(UpperCAmelCase__ ): lowercase_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ ) return outputs with accelerator.main_process_first(): lowercase_ = dataset.map( UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) lowercase_ = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(UpperCAmelCase__ ): if use_longest: return tokenizer.pad(UpperCAmelCase__ , padding="""longest""" , return_tensors="""pt""" ) return tokenizer.pad(UpperCAmelCase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" ) return DataLoader(UpperCAmelCase__ , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=1_6 ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = Accelerator(dispatch_batches=UpperCAmelCase__ , split_batches=UpperCAmelCase__ ) lowercase_ = get_dataloader(UpperCAmelCase__ , not dispatch_batches ) lowercase_ = AutoModelForSequenceClassification.from_pretrained( """hf-internal-testing/mrpc-bert-base-cased""" , return_dict=UpperCAmelCase__ ) lowercase_ , lowercase_ = accelerator.prepare(UpperCAmelCase__ , UpperCAmelCase__ ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = [] for batch in dataloader: lowercase_ , lowercase_ = batch.values() with torch.no_grad(): lowercase_ = model(UpperCAmelCase__ ) lowercase_ , lowercase_ = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) lowercase_ , lowercase_ = [], [] for logit, targ in logits_and_targets: logits.append(UpperCAmelCase__ ) targs.append(UpperCAmelCase__ ) lowercase_ , lowercase_ = torch.cat(UpperCAmelCase__ ), torch.cat(UpperCAmelCase__ ) return logits, targs def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=8_2 , UpperCAmelCase__=False , UpperCAmelCase__=False , UpperCAmelCase__=1_6 ): lowercase_ , lowercase_ , lowercase_ = get_basic_setup(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ , lowercase_ = generate_predictions(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) assert ( len(UpperCAmelCase__ ) == num_samples ), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCAmelCase__ )}''' def UpperCAmelCase_ ( UpperCAmelCase__ = False , UpperCAmelCase__ = False ): lowercase_ = evaluate.load("""glue""" , """mrpc""" ) lowercase_ , lowercase_ = get_mrpc_setup(UpperCAmelCase__ , UpperCAmelCase__ ) # First do baseline lowercase_ , lowercase_ , lowercase_ = setup["""no"""] model.to(UpperCAmelCase__ ) model.eval() for batch in dataloader: batch.to(UpperCAmelCase__ ) with torch.inference_mode(): lowercase_ = model(**UpperCAmelCase__ ) lowercase_ = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=UpperCAmelCase__ , references=batch["""labels"""] ) lowercase_ = metric.compute() # Then do distributed lowercase_ , lowercase_ , lowercase_ = setup["""ddp"""] model.eval() for batch in dataloader: with torch.inference_mode(): lowercase_ = model(**UpperCAmelCase__ ) lowercase_ = outputs.logits.argmax(dim=-1 ) lowercase_ = batch["""labels"""] lowercase_ , lowercase_ = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ ) lowercase_ = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def UpperCAmelCase_ ( ): lowercase_ = Accelerator(split_batches=UpperCAmelCase__ , dispatch_batches=UpperCAmelCase__ ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("""**Testing gather_for_metrics**""" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' ) test_mrpc(UpperCAmelCase__ , UpperCAmelCase__ ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("""**Test torch metrics**""" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: lowercase_ = Accelerator(split_batches=UpperCAmelCase__ , dispatch_batches=UpperCAmelCase__ ) if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' ) test_torch_metrics(UpperCAmelCase__ , 9_9 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("""**Test last batch is not dropped when perfectly divisible**""" ) lowercase_ = Accelerator() test_torch_metrics(UpperCAmelCase__ , 5_1_2 ) accelerator.state._reset_state() def UpperCAmelCase_ ( UpperCAmelCase__ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
650
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) a = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
650
1
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class UpperCamelCase__ ( ctypes.Structure ): # _fields is a specific attr expected by ctypes __SCREAMING_SNAKE_CASE : Optional[int] = [('size', ctypes.c_int), ('visible', ctypes.c_byte)] def UpperCAmelCase_ ( ): if os.name == "nt": lowercase_ = CursorInfo() lowercase_ = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def UpperCAmelCase_ ( ): if os.name == "nt": lowercase_ = CursorInfo() lowercase_ = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def UpperCAmelCase_ ( ): try: hide_cursor() yield finally: show_cursor()
650
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() a = logging.get_logger(__name__) a = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } a = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): for attribute in key.split(""".""" ): lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) if weight_type is not None: lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape else: lowercase_ = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase_ = value elif weight_type == "weight_g": lowercase_ = value elif weight_type == "weight_v": lowercase_ = value elif weight_type == "bias": lowercase_ = value else: lowercase_ = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = [] lowercase_ = fairseq_model.state_dict() lowercase_ = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight lowercase_ = None for name, value in fairseq_dict.items(): lowercase_ = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , ) lowercase_ = True elif name.split(""".""" )[0] == "proj": lowercase_ = fairseq_model.proj lowercase_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: lowercase_ = True if "*" in mapped_key: lowercase_ = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2] lowercase_ = mapped_key.replace("""*""" , UpperCAmelCase__ ) if "weight_g" in name: lowercase_ = """weight_g""" elif "weight_v" in name: lowercase_ = """weight_v""" elif "bias" in name: lowercase_ = """bias""" elif "weight" in name: lowercase_ = """weight""" else: lowercase_ = None set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) continue if not is_used: unused_weights.append(UpperCAmelCase__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) return proj_weight def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = full_name.split("""conv_layers.""" )[-1] lowercase_ = name.split(""".""" ) lowercase_ = int(items[0] ) lowercase_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowercase_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCAmelCase__ ) def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ , lowercase_ = emb.weight.shape lowercase_ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ ) lowercase_ = emb.weight.data return lin_layer def UpperCAmelCase_ ( UpperCAmelCase__ ): with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f: lowercase_ = f.readlines() lowercase_ = [line.split(""" """ )[0] for line in lines] lowercase_ = len(UpperCAmelCase__ ) lowercase_ = { """<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3, } vocab_dict.update(dict(zip(UpperCAmelCase__ , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ): lowercase_ = WavaVecaConfig.from_pretrained(UpperCAmelCase__ ) lowercase_ = SpeechaTextaConfig.from_pretrained( UpperCAmelCase__ , vocab_size=UpperCAmelCase__ , decoder_layers=UpperCAmelCase__ , do_stable_layer_norm=UpperCAmelCase__ ) lowercase_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ) lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) lowercase_ = model[0].eval() # set weights for wav2vec2 encoder lowercase_ = WavaVecaModel(UpperCAmelCase__ ) lowercase_ = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase__ ) lowercase_ = SpeechaTextaForCausalLM(UpperCAmelCase__ ) lowercase_ , lowercase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase__ ) # set output linear layer unexpected_keys.remove("""embed_out""" ) lowercase_ = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) lowercase_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ ) lowercase_ = False # add projection layer lowercase_ = nn.Parameter(projection_layer.weight ) lowercase_ = nn.Parameter(projection_layer.bias ) lowercase_ = create_vocab_dict(UpperCAmelCase__ ) with open(os.path.join(UpperCAmelCase__ , """vocab.json""" ) , """w""" ) as fp: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase__ , """vocab.json""" ) ) tokenizer.save_pretrained(UpperCAmelCase__ ) lowercase_ = hf_wavavec.config.to_dict() lowercase_ = tokenizer.pad_token_id lowercase_ = tokenizer.bos_token_id lowercase_ = tokenizer.eos_token_id lowercase_ = """speech_to_text_2""" lowercase_ = """wav2vec2""" lowercase_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ ) hf_wavavec.save_pretrained(UpperCAmelCase__ ) feature_extractor.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-large-lv60', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/s2t-small-mustc-en-fr-st', type=str, help='Path to hf decoder s2t checkpoint config', ) parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder') parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers') a = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
650
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a = {'configuration_encoder_decoder': ['EncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['EncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['TFEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['FlaxEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
650
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) # TODO Update this a = { 'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json', # See all ESM models at https://huggingface.co/models?filter=esm } class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Tuple = 'esm' def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Dict=3_072 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=1_026 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : str , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = position_embedding_type lowercase_ = use_cache lowercase_ = emb_layer_norm_before lowercase_ = token_dropout lowercase_ = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("""No esmfold_config supplied for folding model, using default values.""" ) lowercase_ = EsmFoldConfig() elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = EsmFoldConfig(**UpperCamelCase__ ) lowercase_ = esmfold_config if vocab_list is None: logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" ) lowercase_ = get_default_vocab_list() else: lowercase_ = vocab_list else: lowercase_ = None lowercase_ = None if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ): raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = super().to_dict() if isinstance(self.esmfold_config , UpperCamelCase__ ): lowercase_ = self.esmfold_config.to_dict() return output @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : str = None __SCREAMING_SNAKE_CASE : bool = True __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : float = 0 __SCREAMING_SNAKE_CASE : bool = True __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : "TrunkConfig" = None def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' if self.trunk is None: lowercase_ = TrunkConfig() elif isinstance(self.trunk , UpperCamelCase__ ): lowercase_ = TrunkConfig(**self.trunk ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = asdict(self ) lowercase_ = self.trunk.to_dict() return output @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : int = 48 __SCREAMING_SNAKE_CASE : int = 1024 __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : float = 0 __SCREAMING_SNAKE_CASE : float = 0 __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : Optional[int] = 128 __SCREAMING_SNAKE_CASE : "StructureModuleConfig" = None def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' if self.structure_module is None: lowercase_ = StructureModuleConfig() elif isinstance(self.structure_module , UpperCamelCase__ ): lowercase_ = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( """`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got""" F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( """`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got""" F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) lowercase_ = self.sequence_state_dim // self.sequence_head_width lowercase_ = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( """`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got""" F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( """`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got""" F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = asdict(self ) lowercase_ = self.structure_module.to_dict() return output @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : int = 384 __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : int = 16 __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : int = 12 __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : int = 8 __SCREAMING_SNAKE_CASE : float = 0.1 __SCREAMING_SNAKE_CASE : int = 8 __SCREAMING_SNAKE_CASE : int = 1 __SCREAMING_SNAKE_CASE : int = 2 __SCREAMING_SNAKE_CASE : int = 7 __SCREAMING_SNAKE_CASE : int = 10 __SCREAMING_SNAKE_CASE : float = 1e-8 __SCREAMING_SNAKE_CASE : float = 1e5 def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' return asdict(self ) def UpperCAmelCase_ ( ): return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
650
1
class UpperCamelCase__ : def __init__( self : Union[str, Any] , UpperCamelCase__ : list ): '''simple docstring''' lowercase_ = set_counts lowercase_ = max(UpperCamelCase__ ) lowercase_ = len(UpperCamelCase__ ) lowercase_ = [1] * num_sets lowercase_ = list(range(UpperCamelCase__ ) ) def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ): '''simple docstring''' lowercase_ = self.get_parent(UpperCamelCase__ ) lowercase_ = self.get_parent(UpperCamelCase__ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ = 0 lowercase_ = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ = 0 lowercase_ = src_parent lowercase_ = self.set_counts[src_parent] lowercase_ = max(self.max_set , UpperCamelCase__ ) return True def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : int ): '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set lowercase_ = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
650
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def UpperCAmelCase_ ( UpperCAmelCase__=None ): if subparsers is not None: lowercase_ = subparsers.add_parser("""env""" ) else: lowercase_ = argparse.ArgumentParser("""Accelerate env command""" ) parser.add_argument( """--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" ) if subparsers is not None: parser.set_defaults(func=UpperCAmelCase__ ) return parser def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = torch.__version__ lowercase_ = torch.cuda.is_available() lowercase_ = is_xpu_available() lowercase_ = is_npu_available() lowercase_ = """Not found""" # Get the default from the config file. if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ): lowercase_ = load_config_from_file(args.config_file ).to_dict() lowercase_ = { """`Accelerate` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """Numpy version""": np.__version__, """PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''', """PyTorch XPU available""": str(UpperCAmelCase__ ), """PyTorch NPU available""": str(UpperCAmelCase__ ), """System RAM""": F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''', } if pt_cuda_available: lowercase_ = torch.cuda.get_device_name() print("""\nCopy-and-paste the text below in your GitHub issue\n""" ) print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) ) print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" ) lowercase_ = ( """\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else F'''\t{accelerate_config}''' ) print(UpperCAmelCase__ ) lowercase_ = accelerate_config return info def UpperCAmelCase_ ( ): lowercase_ = env_command_parser() lowercase_ = parser.parse_args() env_command(UpperCAmelCase__ ) return 0 if __name__ == "__main__": raise SystemExit(main())
650
1
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = AudioLDMPipeline __SCREAMING_SNAKE_CASE : Optional[int] = TEXT_TO_AUDIO_PARAMS __SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_TO_AUDIO_BATCH_PARAMS __SCREAMING_SNAKE_CASE : Tuple = frozenset( [ 'num_inference_steps', 'num_waveforms_per_prompt', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) lowercase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=UpperCamelCase__ , ) lowercase_ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) torch.manual_seed(0 ) lowercase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase_ = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , ) lowercase_ = ClapTextModelWithProjection(UpperCamelCase__ ) lowercase_ = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 ) lowercase_ = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=UpperCamelCase__ , ) lowercase_ = SpeechTaHifiGan(UpperCamelCase__ ) lowercase_ = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """vocoder""": vocoder, } return components def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : int=0 ): '''simple docstring''' if str(UpperCamelCase__ ).startswith("""mps""" ): lowercase_ = torch.manual_seed(UpperCamelCase__ ) else: lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowercase_ = { """prompt""": """A hammer hitting a wooden surface""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, } return inputs def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase_ = self.get_dummy_components() lowercase_ = AudioLDMPipeline(**UpperCamelCase__ ) lowercase_ = audioldm_pipe.to(UpperCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowercase_ = audioldm_pipe(**UpperCamelCase__ ) lowercase_ = output.audios[0] assert audio.ndim == 1 assert len(UpperCamelCase__ ) == 256 lowercase_ = audio[:10] lowercase_ = np.array( [-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' lowercase_ = self.get_dummy_components() lowercase_ = AudioLDMPipeline(**UpperCamelCase__ ) lowercase_ = audioldm_pipe.to(UpperCamelCase__ ) lowercase_ = audioldm_pipe.to(UpperCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowercase_ = 3 * [inputs["""prompt"""]] # forward lowercase_ = audioldm_pipe(**UpperCamelCase__ ) lowercase_ = output.audios[0] lowercase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowercase_ = 3 * [inputs.pop("""prompt""" )] lowercase_ = audioldm_pipe.tokenizer( UpperCamelCase__ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""pt""" , ) lowercase_ = text_inputs["""input_ids"""].to(UpperCamelCase__ ) lowercase_ = audioldm_pipe.text_encoder( UpperCamelCase__ , ) lowercase_ = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state lowercase_ = F.normalize(UpperCamelCase__ , dim=-1 ) lowercase_ = prompt_embeds # forward lowercase_ = audioldm_pipe(**UpperCamelCase__ ) lowercase_ = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = self.get_dummy_components() lowercase_ = AudioLDMPipeline(**UpperCamelCase__ ) lowercase_ = audioldm_pipe.to(UpperCamelCase__ ) lowercase_ = audioldm_pipe.to(UpperCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowercase_ = 3 * ["""this is a negative prompt"""] lowercase_ = negative_prompt lowercase_ = 3 * [inputs["""prompt"""]] # forward lowercase_ = audioldm_pipe(**UpperCamelCase__ ) lowercase_ = output.audios[0] lowercase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowercase_ = 3 * [inputs.pop("""prompt""" )] lowercase_ = [] for p in [prompt, negative_prompt]: lowercase_ = audioldm_pipe.tokenizer( UpperCamelCase__ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""pt""" , ) lowercase_ = text_inputs["""input_ids"""].to(UpperCamelCase__ ) lowercase_ = audioldm_pipe.text_encoder( UpperCamelCase__ , ) lowercase_ = text_embeds.text_embeds # additional L_2 normalization over each hidden-state lowercase_ = F.normalize(UpperCamelCase__ , dim=-1 ) embeds.append(UpperCamelCase__ ) lowercase_ , lowercase_ = embeds # forward lowercase_ = audioldm_pipe(**UpperCamelCase__ ) lowercase_ = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase_ = self.get_dummy_components() lowercase_ = PNDMScheduler(skip_prk_steps=UpperCamelCase__ ) lowercase_ = AudioLDMPipeline(**UpperCamelCase__ ) lowercase_ = audioldm_pipe.to(UpperCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowercase_ = """egg cracking""" lowercase_ = audioldm_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ ) lowercase_ = output.audios[0] assert audio.ndim == 1 assert len(UpperCamelCase__ ) == 256 lowercase_ = audio[:10] lowercase_ = np.array( [-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase_ = self.get_dummy_components() lowercase_ = PNDMScheduler(skip_prk_steps=UpperCamelCase__ ) lowercase_ = AudioLDMPipeline(**UpperCamelCase__ ) lowercase_ = audioldm_pipe.to(UpperCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = """A hammer hitting a wooden surface""" # test num_waveforms_per_prompt=1 (default) lowercase_ = audioldm_pipe(UpperCamelCase__ , num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts lowercase_ = 2 lowercase_ = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt lowercase_ = 2 lowercase_ = audioldm_pipe(UpperCamelCase__ , num_inference_steps=2 , num_waveforms_per_prompt=UpperCamelCase__ ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts lowercase_ = 2 lowercase_ = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=UpperCamelCase__ ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase_ = self.get_dummy_components() lowercase_ = AudioLDMPipeline(**UpperCamelCase__ ) lowercase_ = audioldm_pipe.to(UpperCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = audioldm_pipe.vocoder.config.sampling_rate lowercase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowercase_ = audioldm_pipe(audio_length_in_s=0.016 , **UpperCamelCase__ ) lowercase_ = output.audios[0] assert audio.ndim == 1 assert len(UpperCamelCase__ ) / vocoder_sampling_rate == 0.016 lowercase_ = audioldm_pipe(audio_length_in_s=0.032 , **UpperCamelCase__ ) lowercase_ = output.audios[0] assert audio.ndim == 1 assert len(UpperCamelCase__ ) / vocoder_sampling_rate == 0.032 def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' lowercase_ = self.get_dummy_components() lowercase_ = AudioLDMPipeline(**UpperCamelCase__ ) lowercase_ = audioldm_pipe.to(UpperCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = ["""hey"""] lowercase_ = audioldm_pipe(UpperCamelCase__ , num_inference_steps=1 ) lowercase_ = output.audios.shape assert audio_shape == (1, 256) lowercase_ = audioldm_pipe.vocoder.config config.model_in_dim *= 2 lowercase_ = SpeechTaHifiGan(UpperCamelCase__ ).to(UpperCamelCase__ ) lowercase_ = audioldm_pipe(UpperCamelCase__ , num_inference_steps=1 ) lowercase_ = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCamelCase__ ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' self._test_inference_batch_single_identical(test_mean_pixel_difference=UpperCamelCase__ ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase__ ) @slow class UpperCamelCase__ ( unittest.TestCase ): def UpperCAmelCase__ ( self : Any ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Dict="cpu" , UpperCamelCase__ : List[str]=torch.floataa , UpperCamelCase__ : str=0 ): '''simple docstring''' lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowercase_ = np.random.RandomState(UpperCamelCase__ ).standard_normal((1, 8, 128, 16) ) lowercase_ = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ) lowercase_ = { """prompt""": """A hammer hitting a wooden surface""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 2.5, } return inputs def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" ) lowercase_ = audioldm_pipe.to(UpperCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_inputs(UpperCamelCase__ ) lowercase_ = 25 lowercase_ = audioldm_pipe(**UpperCamelCase__ ).audios[0] assert audio.ndim == 1 assert len(UpperCamelCase__ ) == 81_920 lowercase_ = audio[77_230:77_240] lowercase_ = np.array( [-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] ) lowercase_ = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" ) lowercase_ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) lowercase_ = audioldm_pipe.to(UpperCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_inputs(UpperCamelCase__ ) lowercase_ = audioldm_pipe(**UpperCamelCase__ ).audios[0] assert audio.ndim == 1 assert len(UpperCamelCase__ ) == 81_920 lowercase_ = audio[27_780:27_790] lowercase_ = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] ) lowercase_ = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
650
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class UpperCamelCase__ : def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Tuple=30 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=2 , ): '''simple docstring''' lowercase_ = parent lowercase_ = batch_size lowercase_ = image_size lowercase_ = patch_size lowercase_ = num_channels lowercase_ = is_training lowercase_ = use_labels lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = scope lowercase_ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowercase_ = (image_size // patch_size) ** 2 lowercase_ = num_patches + 2 def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ): '''simple docstring''' lowercase_ = DeiTModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ): '''simple docstring''' lowercase_ = DeiTForMaskedImageModeling(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase_ = 1 lowercase_ = DeiTForMaskedImageModeling(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ): '''simple docstring''' lowercase_ = self.type_sequence_label_size lowercase_ = DeiTForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase_ = 1 lowercase_ = DeiTForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = config_and_inputs lowercase_ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : str = ( { 'feature-extraction': DeiTModel, 'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : List[Any] = False __SCREAMING_SNAKE_CASE : List[Any] = False def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = DeiTModelTester(self ) lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = model_class(UpperCamelCase__ ) lowercase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ = [*signature.parameters.keys()] lowercase_ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False ): '''simple docstring''' lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' if not self.model_tester.is_training: return lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(UpperCamelCase__ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue lowercase_ = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.train() lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowercase_ = model(**UpperCamelCase__ ).loss loss.backward() def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return lowercase_ = False lowercase_ = True for model_class in self.all_model_classes: if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue lowercase_ = model_class(UpperCamelCase__ ) model.gradient_checkpointing_enable() model.to(UpperCamelCase__ ) model.train() lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowercase_ = model(**UpperCamelCase__ ).loss loss.backward() def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = [ {"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float}, {"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long}, {"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(UpperCamelCase__ ), *get_values(UpperCamelCase__ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ): lowercase_ = problem_type["""title"""] lowercase_ = problem_type["""num_labels"""] lowercase_ = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.train() lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) if problem_type["num_labels"] > 1: lowercase_ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] ) lowercase_ = inputs["""labels"""].to(problem_type["""dtype"""] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list: lowercase_ = model(**UpperCamelCase__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def UpperCAmelCase__ ( self : int ): '''simple docstring''' for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ = DeiTModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def UpperCAmelCase_ ( ): lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to( UpperCamelCase__ ) lowercase_ = self.default_image_processor lowercase_ = prepare_img() lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowercase_ = model(**UpperCamelCase__ ) # verify the logits lowercase_ = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowercase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = DeiTModel.from_pretrained( """facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" ) lowercase_ = self.default_image_processor lowercase_ = prepare_img() lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ) lowercase_ = inputs.pixel_values.to(UpperCamelCase__ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowercase_ = model(UpperCamelCase__ )
650
1
def UpperCAmelCase_ ( UpperCAmelCase__ ): if n == 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return 0 elif n == 2: return 1 else: lowercase_ = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = 0 lowercase_ = 2 while digits < n: index += 1 lowercase_ = len(str(fibonacci(UpperCAmelCase__ ) ) ) return index def UpperCAmelCase_ ( UpperCAmelCase__ = 1_0_0_0 ): return fibonacci_digits_index(UpperCAmelCase__ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
650
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
650
1
import math def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(UpperCAmelCase__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("""This should never happen""" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. a = 'Enter the base and the power separated by a comma: ' a , a = map(int, input(prompt).split(',')) a , a = map(int, input(prompt).split(',')) # We find the log of each number, using the function res(), which takes two # arguments. a = res(xa, ya) a = res(xa, ya) # We check for the largest number if resa > resa: print('Largest number is', xa, '^', ya) elif resa > resa: print('Largest number is', xa, '^', ya) else: print('Both are equal')
650
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizer __SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizerFast __SCREAMING_SNAKE_CASE : List[Any] = True __SCREAMING_SNAKE_CASE : int = True def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = """<pad>""" lowercase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(len(UpperCamelCase__ ) , 1_008 ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) lowercase_ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowercase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowercase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def UpperCAmelCase__ ( self : str ): '''simple docstring''' return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(UpperCamelCase__ , f.name ) lowercase_ = XGLMTokenizer(f.name , keep_accents=UpperCamelCase__ ) lowercase_ = pickle.dumps(UpperCamelCase__ ) pickle.loads(UpperCamelCase__ ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' if not self.test_rust_tokenizer: return lowercase_ = self.get_tokenizer() lowercase_ = self.get_rust_tokenizer() lowercase_ = """I was born in 92000, and this is falsé.""" lowercase_ = tokenizer.tokenize(UpperCamelCase__ ) lowercase_ = rust_tokenizer.tokenize(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) lowercase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = self.get_rust_tokenizer() lowercase_ = tokenizer.encode(UpperCamelCase__ ) lowercase_ = rust_tokenizer.encode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = """Hello World!""" lowercase_ = [2, 31_227, 4_447, 35] self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) ) @slow def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth""" ) # fmt: off lowercase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) ) @slow def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = { """input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="""facebook/xglm-564M""" , padding=UpperCamelCase__ , )
650
1
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = args.pruning_method lowercase_ = args.threshold lowercase_ = args.model_name_or_path.rstrip("""/""" ) lowercase_ = args.target_model_path print(F'''Load fine-pruned model from {model_name_or_path}''' ) lowercase_ = torch.load(os.path.join(UpperCAmelCase__ , """pytorch_model.bin""" ) ) lowercase_ = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowercase_ = tensor print(F'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: lowercase_ = tensor print(F'''Copied layer {name}''' ) elif "bias" in name: lowercase_ = tensor print(F'''Copied layer {name}''' ) else: if pruning_method == "magnitude": lowercase_ = MagnitudeBinarizer.apply(inputs=UpperCAmelCase__ , threshold=UpperCAmelCase__ ) lowercase_ = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue lowercase_ = name[:-6] lowercase_ = model[F'''{prefix_}mask_scores'''] lowercase_ = TopKBinarizer.apply(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowercase_ = name[:-6] lowercase_ = model[F'''{prefix_}mask_scores'''] lowercase_ = ThresholdBinarizer.apply(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue lowercase_ = name[:-6] lowercase_ = model[F'''{prefix_}mask_scores'''] lowercase_ , lowercase_ = -0.1, 1.1 lowercase_ = torch.sigmoid(UpperCAmelCase__ ) lowercase_ = s * (r - l) + l lowercase_ = s_bar.clamp(min=0.0 , max=1.0 ) lowercase_ = tensor * mask print(F'''Pruned layer {name}''' ) else: raise ValueError("""Unknown pruning method""" ) if target_model_path is None: lowercase_ = os.path.join( os.path.dirname(UpperCAmelCase__ ) , F'''bertarized_{os.path.basename(UpperCAmelCase__ )}''' ) if not os.path.isdir(UpperCAmelCase__ ): shutil.copytree(UpperCAmelCase__ , UpperCAmelCase__ ) print(F'''\nCreated folder {target_model_path}''' ) torch.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , """pytorch_model.bin""" ) ) print("""\nPruned model saved! See you later!""" ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument( '--pruning_method', choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'], type=str, required=True, help=( 'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,' ' sigmoied_threshold = Soft movement pruning)' ), ) parser.add_argument( '--threshold', type=float, required=False, help=( 'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.' 'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.' 'Not needed for `l0`' ), ) parser.add_argument( '--model_name_or_path', type=str, required=True, help='Folder containing the model that was previously fine-pruned', ) parser.add_argument( '--target_model_path', default=None, type=str, required=False, help='Folder containing the model that was previously fine-pruned', ) a = parser.parse_args() main(args)
650
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: a = None a = logging.get_logger(__name__) a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} a = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 a = { 't5-small': 5_1_2, 't5-base': 5_1_2, 't5-large': 5_1_2, 't5-3b': 5_1_2, 't5-11b': 5_1_2, } class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask'] __SCREAMING_SNAKE_CASE : Dict = TaTokenizer __SCREAMING_SNAKE_CASE : List[int] = [] def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]=100 , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: lowercase_ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens lowercase_ = len(set(filter(lambda UpperCamelCase__ : bool("""extra_id_""" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) ) if extra_tokens != extra_ids: raise ValueError( F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , ) lowercase_ = vocab_file lowercase_ = False if not self.vocab_file else True lowercase_ = extra_ids @staticmethod def UpperCAmelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int ): '''simple docstring''' if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: lowercase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F''' {pretrained_model_name_or_path} automatically truncating your input to''' F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase__ , ) return max_model_length def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ): copyfile(self.vocab_file , UpperCamelCase__ ) logger.info(F'''Copy vocab file to {out_vocab_file}''' ) return (out_vocab_file,) def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase_ = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: lowercase_ = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return list( set(filter(lambda UpperCamelCase__ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
650
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCamelCase__ ( unittest.TestCase ): def __init__( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : str=18 , UpperCamelCase__ : int=30 , UpperCamelCase__ : Tuple=400 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=True , ): '''simple docstring''' lowercase_ = size if size is not None else {"""shortest_edge""": 20} lowercase_ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} lowercase_ = parent lowercase_ = batch_size lowercase_ = num_channels lowercase_ = image_size lowercase_ = min_resolution lowercase_ = max_resolution lowercase_ = do_resize lowercase_ = size lowercase_ = do_center_crop lowercase_ = crop_size lowercase_ = do_flip_channel_order def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = MobileViTImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """do_center_crop""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """center_crop""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """do_flip_channel_order""" ) ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) lowercase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' pass def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image ) # Test not batched input lowercase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowercase_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray ) # Test not batched input lowercase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowercase_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) # Test not batched input lowercase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowercase_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
650
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionDiffEditPipeline __SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} __SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} __SCREAMING_SNAKE_CASE : int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __SCREAMING_SNAKE_CASE : Any = frozenset([] ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) lowercase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , ) lowercase_ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) lowercase_ = DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , ) torch.manual_seed(0 ) lowercase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowercase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , ) lowercase_ = CLIPTextModel(UpperCamelCase__ ) lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase_ = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=0 ): '''simple docstring''' lowercase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowercase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) if str(UpperCamelCase__ ).startswith("""mps""" ): lowercase_ = torch.manual_seed(UpperCamelCase__ ) else: lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowercase_ = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ): '''simple docstring''' lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ) if str(UpperCamelCase__ ).startswith("""mps""" ): lowercase_ = torch.manual_seed(UpperCamelCase__ ) else: lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowercase_ = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=0 ): '''simple docstring''' lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ) if str(UpperCamelCase__ ).startswith("""mps""" ): lowercase_ = torch.manual_seed(UpperCamelCase__ ) else: lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowercase_ = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : int ): '''simple docstring''' if not hasattr(self.pipeline_class , """_optional_components""" ): return lowercase_ = self.get_dummy_components() lowercase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) lowercase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowercase_ = pipe(**UpperCamelCase__ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCamelCase__ ) lowercase_ = self.pipeline_class.from_pretrained(UpperCamelCase__ ) pipe_loaded.to(UpperCamelCase__ ) pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ ) for optional_component in pipe._optional_components: self.assertTrue( getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , ) lowercase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowercase_ = pipe_loaded(**UpperCamelCase__ )[0] lowercase_ = np.abs(output - output_loaded ).max() self.assertLess(UpperCamelCase__ , 1e-4 ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = """cpu""" lowercase_ = self.get_dummy_components() lowercase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_dummy_mask_inputs(UpperCamelCase__ ) lowercase_ = pipe.generate_mask(**UpperCamelCase__ ) lowercase_ = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) lowercase_ = np.array([0] * 9 ) lowercase_ = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase__ , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = """cpu""" lowercase_ = self.get_dummy_components() lowercase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ ) lowercase_ = pipe.invert(**UpperCamelCase__ ).images lowercase_ = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowercase_ = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase__ , 1e-3 ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = """cpu""" lowercase_ = self.get_dummy_components() lowercase_ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""} lowercase_ = DPMSolverMultistepScheduler(**UpperCamelCase__ ) lowercase_ = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ ) lowercase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ ) lowercase_ = pipe.invert(**UpperCamelCase__ ).images lowercase_ = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowercase_ = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase__ , 1e-3 ) @require_torch_gpu @slow class UpperCamelCase__ ( unittest.TestCase ): def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def UpperCAmelCase__ ( cls : Dict ): '''simple docstring''' lowercase_ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" ) lowercase_ = raw_image.convert("""RGB""" ).resize((768, 768) ) lowercase_ = raw_image def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = torch.manual_seed(0 ) lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa ) lowercase_ = DDIMScheduler.from_config(pipe.scheduler.config ) lowercase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = """a bowl of fruit""" lowercase_ = """a bowl of pears""" lowercase_ = pipe.generate_mask( image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , ) lowercase_ = pipe.invert( prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents lowercase_ = pipe( prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0] lowercase_ = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1 def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = torch.manual_seed(0 ) lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa ) lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowercase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = """a bowl of fruit""" lowercase_ = """a bowl of pears""" lowercase_ = pipe.generate_mask( image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , ) lowercase_ = pipe.invert( prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents lowercase_ = pipe( prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0] lowercase_ = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1
650
1
import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( 'The `inpainting.py` script is outdated. Please use directly `from diffusers import' ' StableDiffusionInpaintPipeline` instead.' )
650
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging a = logging.get_logger(__name__) class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : str = ['pixel_values'] def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 8 , **UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = do_rescale lowercase_ = rescale_factor lowercase_ = do_pad lowercase_ = pad_size def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] ): '''simple docstring''' return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None ): '''simple docstring''' lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ ) lowercase_ = (old_height // size + 1) * size - old_height lowercase_ = (old_width // size + 1) * size - old_width return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ): '''simple docstring''' lowercase_ = do_rescale if do_rescale is not None else self.do_rescale lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ = do_pad if do_pad is not None else self.do_pad lowercase_ = pad_size if pad_size is not None else self.pad_size lowercase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_rescale: lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_pad: lowercase_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] lowercase_ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
650
1
def UpperCAmelCase_ ( UpperCAmelCase__ ): if edge <= 0 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError("""Length must be a positive.""" ) return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def UpperCAmelCase_ ( UpperCAmelCase__ ): if edge <= 0 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError("""Length must be a positive.""" ) return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
650
def UpperCAmelCase_ ( UpperCAmelCase__ ): if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise TypeError("""Input value must be an 'int' type""" ) lowercase_ = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
650
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
650
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ): @register_to_config def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ): '''simple docstring''' super().__init__() lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = False lowercase_ = nn.Dropout(p=UpperCamelCase__ ) lowercase_ = TaConfig( vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , ) lowercase_ = nn.ModuleList() for lyr_num in range(UpperCamelCase__ ): lowercase_ = TaBlock(UpperCamelCase__ ) self.encoders.append(UpperCamelCase__ ) lowercase_ = TaLayerNorm(UpperCamelCase__ ) lowercase_ = nn.Dropout(p=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ): '''simple docstring''' lowercase_ = self.token_embedder(UpperCamelCase__ ) lowercase_ = encoder_input_tokens.shape[1] lowercase_ = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device ) x += self.position_encoding(UpperCamelCase__ ) lowercase_ = self.dropout_pre(UpperCamelCase__ ) # inverted the attention mask lowercase_ = encoder_input_tokens.size() lowercase_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ ) for lyr in self.encoders: lowercase_ = lyr(UpperCamelCase__ , UpperCamelCase__ )[0] lowercase_ = self.layer_norm(UpperCamelCase__ ) return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
650
1
import unittest from transformers import DonutProcessor a = 'naver-clova-ix/donut-base' class UpperCamelCase__ ( unittest.TestCase ): def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = DonutProcessor.from_pretrained(UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' lowercase_ = { """name""": """John Doe""", """age""": """99""", """city""": """Atlanta""", """state""": """GA""", """zip""": """30301""", """phone""": """123-4567""", """nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}], } lowercase_ = ( """<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>""" """<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>""" """<s_nicknames><s_nickname>Johnny</s_nickname>""" """<sep/><s_nickname>JD</s_nickname></s_nicknames>""" ) lowercase_ = self.processor.tokenajson(UpperCamelCase__ ) self.assertDictEqual(UpperCamelCase__ , UpperCamelCase__ )
650
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar a = TypeVar('T') class UpperCamelCase__ ( Generic[T] ): __SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys __SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache __SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache def __init__( self : str , UpperCamelCase__ : int ): '''simple docstring''' lowercase_ = deque() lowercase_ = set() if not n: lowercase_ = sys.maxsize elif n < 0: raise ValueError("""n should be an integer greater than 0.""" ) else: lowercase_ = n def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : T ): '''simple docstring''' if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: lowercase_ = self.dq_store.pop() self.key_reference.remove(UpperCamelCase__ ) else: self.dq_store.remove(UpperCamelCase__ ) self.dq_store.appendleft(UpperCamelCase__ ) self.key_reference.add(UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' for k in self.dq_store: print(UpperCamelCase__ ) def __repr__( self : Optional[Any] ): '''simple docstring''' return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() a = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
650
1
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. a = 1_0 def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): for i in range(UpperCAmelCase__ , UpperCAmelCase__ ): if array[i] == target: return i return -1 def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = 0 lowercase_ = len(UpperCAmelCase__ ) while left <= right: if right - left < precision: return lin_search(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ = (left + right) // 3 + 1 lowercase_ = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: lowercase_ = one_third - 1 elif array[two_third] < target: lowercase_ = two_third + 1 else: lowercase_ = one_third + 1 lowercase_ = two_third - 1 else: return -1 def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): if left < right: if right - left < precision: return lin_search(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ = (left + right) // 3 + 1 lowercase_ = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(UpperCAmelCase__ , one_third - 1 , UpperCAmelCase__ , UpperCAmelCase__ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCAmelCase__ , UpperCAmelCase__ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() a = input('Enter numbers separated by comma:\n').strip() a = [int(item.strip()) for item in user_input.split(',')] assert collection == sorted(collection), F"List must be ordered.\n{collection}." a = int(input('Enter the number to be found in the list:\n').strip()) a = ite_ternary_search(collection, target) a = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(F'''Iterative search: {target} found at positions: {resulta}''') print(F'''Recursive search: {target} found at positions: {resulta}''') else: print('Not found')
650
def UpperCAmelCase_ ( UpperCAmelCase__ ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
650
1
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
650
def UpperCAmelCase_ ( UpperCAmelCase__=2_8_1_2_3 ): lowercase_ = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i lowercase_ = set() lowercase_ = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(UpperCAmelCase__ ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
650
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { 'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json', 'Salesforce/blip-vqa-capfit-large': ( 'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-base': ( 'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-large': ( 'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json' ), 'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json', 'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json', 'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json', 'Salesforce/blip-itm-large-flikr': ( 'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json' ), } class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : List[str] = 'blip_text_model' def __init__( self : str , UpperCamelCase__ : Optional[int]=30_524 , UpperCamelCase__ : Tuple=768 , UpperCamelCase__ : str=768 , UpperCamelCase__ : Optional[Any]=3_072 , UpperCamelCase__ : str=768 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : str=8 , UpperCamelCase__ : List[str]=512 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=1e-12 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Tuple=30_522 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : Dict=102 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : str=True , **UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' super().__init__( pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , sep_token_id=UpperCamelCase__ , **UpperCamelCase__ , ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = encoder_hidden_size lowercase_ = intermediate_size lowercase_ = projection_dim lowercase_ = hidden_dropout_prob lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = max_position_embeddings lowercase_ = layer_norm_eps lowercase_ = hidden_act lowercase_ = initializer_range lowercase_ = attention_probs_dropout_prob lowercase_ = is_decoder lowercase_ = use_cache @classmethod def UpperCAmelCase__ ( cls : Any , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : int ): '''simple docstring''' cls._set_token_in_kwargs(UpperCamelCase__ ) lowercase_ , lowercase_ = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ ) # get the text config dict if we are loading from BlipConfig if config_dict.get("""model_type""" ) == "blip": lowercase_ = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Tuple = 'blip_vision_model' def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any]=768 , UpperCamelCase__ : Union[str, Any]=3_072 , UpperCamelCase__ : int=512 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=384 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Union[str, Any]=1e-5 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=1e-10 , **UpperCamelCase__ : Union[str, Any] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = hidden_size lowercase_ = intermediate_size lowercase_ = projection_dim lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = patch_size lowercase_ = image_size lowercase_ = initializer_range lowercase_ = attention_dropout lowercase_ = layer_norm_eps lowercase_ = hidden_act @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : Optional[int] ): '''simple docstring''' cls._set_token_in_kwargs(UpperCamelCase__ ) lowercase_ , lowercase_ = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ ) # get the vision config dict if we are loading from BlipConfig if config_dict.get("""model_type""" ) == "blip": lowercase_ = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Dict = 'blip' __SCREAMING_SNAKE_CASE : List[str] = True def __init__( self : Any , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[int]=2.6_592 , UpperCamelCase__ : Tuple=256 , **UpperCamelCase__ : List[Any] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) if text_config is None: lowercase_ = {} logger.info("""`text_config` is `None`. Initializing the `BlipTextConfig` with default values.""" ) if vision_config is None: lowercase_ = {} logger.info("""`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.""" ) lowercase_ = BlipTextConfig(**UpperCamelCase__ ) lowercase_ = BlipVisionConfig(**UpperCamelCase__ ) lowercase_ = self.vision_config.hidden_size lowercase_ = projection_dim lowercase_ = logit_scale_init_value lowercase_ = 1.0 lowercase_ = 0.02 lowercase_ = image_text_hidden_size @classmethod def UpperCAmelCase__ ( cls : Optional[int] , UpperCamelCase__ : BlipTextConfig , UpperCamelCase__ : BlipVisionConfig , **UpperCamelCase__ : Any ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = copy.deepcopy(self.__dict__ ) lowercase_ = self.text_config.to_dict() lowercase_ = self.vision_config.to_dict() lowercase_ = self.__class__.model_type return output
650
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class UpperCamelCase__ : def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ): '''simple docstring''' lowercase_ = parent lowercase_ = batch_size lowercase_ = seq_length lowercase_ = is_training lowercase_ = use_input_mask lowercase_ = use_token_type_ids lowercase_ = use_labels lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = num_labels lowercase_ = num_choices lowercase_ = scope def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ = None if self.use_input_mask: lowercase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ = None if self.use_token_type_ids: lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase_ = None lowercase_ = None lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase_ = ids_tensor([self.batch_size] , self.num_choices ) lowercase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ): '''simple docstring''' lowercase_ = OpenLlamaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' lowercase_ = True lowercase_ = OpenLlamaModel(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , ) lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , ) lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ): '''simple docstring''' lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ): '''simple docstring''' lowercase_ = True lowercase_ = True lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() # first forward pass lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , ) lowercase_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 ) lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0] lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0] # select random slice lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = config_and_inputs lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else () __SCREAMING_SNAKE_CASE : List[Any] = ( { 'feature-extraction': OpenLlamaModel, 'text-classification': OpenLlamaForSequenceClassification, 'text-generation': OpenLlamaForCausalLM, 'zero-shot': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : Optional[int] = False def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = OpenLlamaModelTester(self ) lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = 3 lowercase_ = input_dict["""input_ids"""] lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ ) lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = 3 lowercase_ = """single_label_classification""" lowercase_ = input_dict["""input_ids"""] lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ ) lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = 3 lowercase_ = """multi_label_classification""" lowercase_ = input_dict["""input_ids"""] lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ ) lowercase_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = ids_tensor([1, 10] , config.vocab_size ) lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase_ = OpenLlamaModel(UpperCamelCase__ ) original_model.to(UpperCamelCase__ ) original_model.eval() lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase_ = {"""type""": scaling_type, """factor""": 10.0} lowercase_ = OpenLlamaModel(UpperCamelCase__ ) scaled_model.to(UpperCamelCase__ ) scaled_model.eval() lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
650
1
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def UpperCAmelCase_ ( UpperCAmelCase__ ): assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def UpperCAmelCase_ ( ): assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def UpperCAmelCase_ ( ): lowercase_ = """mock-s3-bucket""" lowercase_ = F'''s3://{mock_bucket}''' lowercase_ = extract_path_from_uri(UpperCAmelCase__ ) assert dataset_path.startswith("""s3://""" ) is False lowercase_ = """./local/path""" lowercase_ = extract_path_from_uri(UpperCAmelCase__ ) assert dataset_path == new_dataset_path def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = is_remote_filesystem(UpperCAmelCase__ ) assert is_remote is True lowercase_ = fsspec.filesystem("""file""" ) lowercase_ = is_remote_filesystem(UpperCAmelCase__ ) assert is_remote is False @pytest.mark.parametrize("""compression_fs_class""" , UpperCAmelCase__ ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file} lowercase_ = input_paths[compression_fs_class.protocol] if input_path is None: lowercase_ = F'''for \'{compression_fs_class.protocol}\' compression protocol, ''' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(UpperCAmelCase__ ) lowercase_ = fsspec.filesystem(compression_fs_class.protocol , fo=UpperCAmelCase__ ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ = os.path.basename(UpperCAmelCase__ ) lowercase_ = expected_filename[: expected_filename.rindex(""".""" )] assert fs.glob("""*""" ) == [expected_filename] with fs.open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f, open(UpperCAmelCase__ , encoding="""utf-8""" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path} lowercase_ = compressed_file_paths[protocol] lowercase_ = """dataset.jsonl""" lowercase_ = F'''{protocol}://{member_file_path}::{compressed_file_path}''' lowercase_ , *lowercase_ = fsspec.get_fs_token_paths(UpperCAmelCase__ ) assert fs.isfile(UpperCAmelCase__ ) assert not fs.isfile("""non_existing_""" + member_file_path ) @pytest.mark.integration def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = hf_api.dataset_info(UpperCAmelCase__ , token=UpperCAmelCase__ ) lowercase_ = HfFileSystem(repo_info=UpperCAmelCase__ , token=UpperCAmelCase__ ) assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"] assert hffs.isdir("""data""" ) assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" ) with open(UpperCAmelCase__ ) as f: assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read() def UpperCAmelCase_ ( ): lowercase_ = """bz2""" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(UpperCAmelCase__ , UpperCAmelCase__ , clobber=UpperCAmelCase__ ) with pytest.warns(UpperCAmelCase__ ) as warning_info: importlib.reload(datasets.filesystems ) assert len(UpperCAmelCase__ ) == 1 assert ( str(warning_info[0].message ) == F'''A filesystem protocol was already set for {protocol} and will be overwritten.''' )
650
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: a = False a = logging.get_logger(__name__) a = 'ybelkada/fonts' def UpperCAmelCase_ ( ): if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ''' """Pix2StructImageProcessor. Please upgrade torch.""" ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): requires_backends(UpperCAmelCase__ , ["""torch"""] ) _check_torch_version() lowercase_ = image_tensor.unsqueeze(0 ) lowercase_ = torch.nn.functional.unfold(UpperCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) ) lowercase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase__ , UpperCAmelCase__ , -1 ) lowercase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 3_6 , UpperCAmelCase__ = "black" , UpperCAmelCase__ = "white" , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , ): requires_backends(UpperCAmelCase__ , """vision""" ) # Add new lines so that each line is no more than 80 characters. lowercase_ = textwrap.TextWrapper(width=8_0 ) lowercase_ = wrapper.wrap(text=UpperCAmelCase__ ) lowercase_ = """\n""".join(UpperCAmelCase__ ) if font_bytes is not None and font_path is None: lowercase_ = io.BytesIO(UpperCAmelCase__ ) elif font_path is not None: lowercase_ = font_path else: lowercase_ = hf_hub_download(UpperCAmelCase__ , """Arial.TTF""" ) lowercase_ = ImageFont.truetype(UpperCAmelCase__ , encoding="""UTF-8""" , size=UpperCAmelCase__ ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. lowercase_ = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , UpperCAmelCase__ ) ) lowercase_ , lowercase_ , lowercase_ , lowercase_ = temp_draw.textbbox((0, 0) , UpperCAmelCase__ , UpperCAmelCase__ ) # Create the actual image with a bit of padding around the text. lowercase_ = text_width + left_padding + right_padding lowercase_ = text_height + top_padding + bottom_padding lowercase_ = Image.new("""RGB""" , (image_width, image_height) , UpperCAmelCase__ ) lowercase_ = ImageDraw.Draw(UpperCAmelCase__ ) draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase__ , fill=UpperCAmelCase__ , font=UpperCAmelCase__ ) return image def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ): requires_backends(UpperCAmelCase__ , """vision""" ) # Convert to PIL image if necessary lowercase_ = to_pil_image(UpperCAmelCase__ ) lowercase_ = render_text(UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase_ = max(header_image.width , image.width ) lowercase_ = int(image.height * (new_width / image.width) ) lowercase_ = int(header_image.height * (new_width / header_image.width) ) lowercase_ = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary lowercase_ = to_numpy_array(UpperCAmelCase__ ) if infer_channel_dimension_format(UpperCAmelCase__ ) == ChannelDimension.LAST: lowercase_ = to_channel_dimension_format(UpperCAmelCase__ , ChannelDimension.LAST ) return new_image class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Tuple = ['flattened_patches'] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 2_048 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} lowercase_ = do_normalize lowercase_ = do_convert_rgb lowercase_ = max_patches lowercase_ = is_vqa def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : dict , **UpperCamelCase__ : Optional[int] ): '''simple docstring''' requires_backends(self.extract_flattened_patches , """torch""" ) _check_torch_version() # convert to torch lowercase_ = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST ) lowercase_ = torch.from_numpy(UpperCamelCase__ ) lowercase_ , lowercase_ = patch_size["""height"""], patch_size["""width"""] lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ ) # maximize scale s.t. lowercase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) lowercase_ = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 ) lowercase_ = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 ) lowercase_ = max(num_feasible_rows * patch_height , 1 ) lowercase_ = max(num_feasible_cols * patch_width , 1 ) lowercase_ = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] lowercase_ = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = patches.shape lowercase_ = patches_shape[1] lowercase_ = patches_shape[2] lowercase_ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] lowercase_ = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] lowercase_ = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] ) lowercase_ = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] lowercase_ = row_ids.to(torch.floataa ) lowercase_ = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] lowercase_ = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] lowercase_ = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float() lowercase_ = to_numpy_array(UpperCamelCase__ ) return result def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict ): '''simple docstring''' if image.dtype == np.uinta: lowercase_ = image.astype(np.floataa ) # take mean across the whole `image` lowercase_ = np.mean(UpperCamelCase__ ) lowercase_ = np.std(UpperCamelCase__ ) lowercase_ = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ): '''simple docstring''' lowercase_ = do_normalize if do_normalize is not None else self.do_normalize lowercase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase_ = patch_size if patch_size is not None else self.patch_size lowercase_ = max_patches if max_patches is not None else self.max_patches lowercase_ = self.is_vqa if kwargs.get("""data_format""" , UpperCamelCase__ ) is not None: raise ValueError("""data_format is not an accepted input as the outputs are """ ) lowercase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase_ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if is_vqa: if header_text is None: raise ValueError("""A header text must be provided for VQA models.""" ) lowercase_ = kwargs.pop("""font_bytes""" , UpperCamelCase__ ) lowercase_ = kwargs.pop("""font_path""" , UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = [header_text] * len(UpperCamelCase__ ) lowercase_ = [ render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ ) for i, image in enumerate(UpperCamelCase__ ) ] if do_normalize: lowercase_ = [self.normalize(image=UpperCamelCase__ ) for image in images] # convert to torch tensor and permute lowercase_ = [ self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ ) for image in images ] # create attention mask in numpy lowercase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] lowercase_ = BatchFeature( data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=UpperCamelCase__ ) return encoded_outputs
650
1
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def UpperCAmelCase_ ( UpperCAmelCase__ ): return EnvironmentCommand() class UpperCamelCase__ ( __magic_name__ ): @staticmethod def UpperCAmelCase__ ( UpperCamelCase__ : ArgumentParser ): '''simple docstring''' lowercase_ = parser.add_parser("""env""" ) download_parser.set_defaults(func=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = huggingface_hub.__version__ lowercase_ = """not installed""" lowercase_ = """NA""" if is_torch_available(): import torch lowercase_ = torch.__version__ lowercase_ = torch.cuda.is_available() lowercase_ = """not installed""" if is_transformers_available(): import transformers lowercase_ = transformers.__version__ lowercase_ = """not installed""" if is_accelerate_available(): import accelerate lowercase_ = accelerate.__version__ lowercase_ = """not installed""" if is_xformers_available(): import xformers lowercase_ = xformers.__version__ lowercase_ = { """`diffusers` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''', """Huggingface_hub version""": hub_version, """Transformers version""": transformers_version, """Accelerate version""": accelerate_version, """xFormers version""": xformers_version, """Using GPU in script?""": """<fill in>""", """Using distributed or parallel set-up in script?""": """<fill in>""", } print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" ) print(self.format_dict(UpperCamelCase__ ) ) return info @staticmethod def UpperCAmelCase__ ( UpperCamelCase__ : Tuple ): '''simple docstring''' return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
650
import cva import numpy as np class UpperCamelCase__ : def __init__( self : List[str] , UpperCamelCase__ : float , UpperCamelCase__ : int ): '''simple docstring''' if k in (0.04, 0.06): lowercase_ = k lowercase_ = window_size else: raise ValueError("""invalid k value""" ) def __str__( self : Optional[int] ): '''simple docstring''' return str(self.k ) def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ): '''simple docstring''' lowercase_ = cva.imread(UpperCamelCase__ , 0 ) lowercase_ , lowercase_ = img.shape lowercase_ = [] lowercase_ = img.copy() lowercase_ = cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB ) lowercase_ , lowercase_ = np.gradient(UpperCamelCase__ ) lowercase_ = dx**2 lowercase_ = dy**2 lowercase_ = dx * dy lowercase_ = 0.04 lowercase_ = self.window_size // 2 for y in range(UpperCamelCase__ , h - offset ): for x in range(UpperCamelCase__ , w - offset ): lowercase_ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase_ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase_ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase_ = (wxx * wyy) - (wxy**2) lowercase_ = wxx + wyy lowercase_ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": a = HarrisCorner(0.04, 3) a , a = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
650
1
# Algorithm for the pigeonhole sorting def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = min(UpperCAmelCase__ ) # min() finds the minimum value lowercase_ = max(UpperCAmelCase__ ) # max() finds the maximum value lowercase_ = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size lowercase_ = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. lowercase_ = 0 for count in range(UpperCAmelCase__ ): while holes[count] > 0: holes[count] -= 1 lowercase_ = count + min_val i += 1 def UpperCAmelCase_ ( ): lowercase_ = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(UpperCAmelCase__ ) print("""Sorted order is:""" , """ """.join(UpperCAmelCase__ ) ) if __name__ == "__main__": main()
650
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): a = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: a = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = (images / 2 + 0.5).clamp(0 , 1 ) lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowercase_ = numpy_to_pil(UpperCAmelCase__ ) return images def UpperCAmelCase_ ( UpperCAmelCase__ ): if images.ndim == 3: lowercase_ = images[None, ...] lowercase_ = (images * 2_5_5).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowercase_ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images] else: lowercase_ = [Image.fromarray(UpperCAmelCase__ ) for image in images] return pil_images
650
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : int = StableDiffusionInpaintPipeline __SCREAMING_SNAKE_CASE : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS __SCREAMING_SNAKE_CASE : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __SCREAMING_SNAKE_CASE : List[str] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __SCREAMING_SNAKE_CASE : Optional[Any] = frozenset([] ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) lowercase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , ) lowercase_ = PNDMScheduler(skip_prk_steps=UpperCamelCase__ ) torch.manual_seed(0 ) lowercase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowercase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , ) lowercase_ = CLIPTextModel(UpperCamelCase__ ) lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase_ = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str=0 ): '''simple docstring''' lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ).resize((64, 64) ) lowercase_ = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) ) if str(UpperCamelCase__ ).startswith("""mps""" ): lowercase_ = torch.manual_seed(UpperCamelCase__ ) else: lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowercase_ = { """prompt""": """A painting of a squirrel eating a burger""", """image""": init_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase_ = self.get_dummy_components() lowercase_ = StableDiffusionInpaintPipeline(**UpperCamelCase__ ) lowercase_ = sd_pipe.to(UpperCamelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowercase_ = sd_pipe(**UpperCamelCase__ ).images lowercase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase_ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) lowercase_ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) lowercase_ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench.npy""" ) lowercase_ = """stabilityai/stable-diffusion-2-inpainting""" lowercase_ = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase__ , safety_checker=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) pipe.enable_attention_slicing() lowercase_ = """Face of a yellow cat, high resolution, sitting on a park bench""" lowercase_ = torch.manual_seed(0 ) lowercase_ = pipe( prompt=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type="""np""" , ) lowercase_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9e-3 def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) lowercase_ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) lowercase_ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench_fp16.npy""" ) lowercase_ = """stabilityai/stable-diffusion-2-inpainting""" lowercase_ = StableDiffusionInpaintPipeline.from_pretrained( UpperCamelCase__ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase__ , ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) pipe.enable_attention_slicing() lowercase_ = """Face of a yellow cat, high resolution, sitting on a park bench""" lowercase_ = torch.manual_seed(0 ) lowercase_ = pipe( prompt=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type="""np""" , ) lowercase_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase_ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) lowercase_ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) lowercase_ = """stabilityai/stable-diffusion-2-inpainting""" lowercase_ = PNDMScheduler.from_pretrained(UpperCamelCase__ , subfolder="""scheduler""" ) lowercase_ = StableDiffusionInpaintPipeline.from_pretrained( UpperCamelCase__ , safety_checker=UpperCamelCase__ , scheduler=UpperCamelCase__ , torch_dtype=torch.floataa , ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowercase_ = """Face of a yellow cat, high resolution, sitting on a park bench""" lowercase_ = torch.manual_seed(0 ) lowercase_ = pipe( prompt=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="""np""" , ) lowercase_ = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
650
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : str = (UnCLIPScheduler,) def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : int ): '''simple docstring''' lowercase_ = { """num_train_timesteps""": 1_000, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**UpperCamelCase__ ) return config def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config(variance_type="""fixed_small_log""" ) lowercase_ = scheduler_class(**UpperCamelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5 def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config(variance_type="""learned_range""" ) lowercase_ = scheduler_class(**UpperCamelCase__ ) lowercase_ = 0.5 assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1e-5 def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config() lowercase_ = scheduler_class(**UpperCamelCase__ ) lowercase_ = scheduler.timesteps lowercase_ = self.dummy_model() lowercase_ = self.dummy_sample_deter lowercase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ ) # 2. predict previous mean of sample x_t-1 lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample lowercase_ = pred_prev_sample lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3 def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config() lowercase_ = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(25 ) lowercase_ = scheduler.timesteps lowercase_ = self.dummy_model() lowercase_ = self.dummy_sample_deter lowercase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ ) if i + 1 == timesteps.shape[0]: lowercase_ = None else: lowercase_ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowercase_ = scheduler.step( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample lowercase_ = pred_prev_sample lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3 def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' pass def UpperCAmelCase__ ( self : int ): '''simple docstring''' pass
650
1
import gc import threading import time import psutil import torch class UpperCamelCase__ : def __init__( self : List[str] ): '''simple docstring''' lowercase_ = psutil.Process() lowercase_ = False def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = -1 while True: lowercase_ = max(self.process.memory_info().rss , self.cpu_memory_peak ) # can't sleep or will not catch the peak right (this comment is here on purpose) if not self.peak_monitoring: break def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = True lowercase_ = threading.Thread(target=self.peak_monitor ) lowercase_ = True self.thread.start() def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' lowercase_ = False self.thread.join() return self.cpu_memory_peak a = PeakCPUMemory() def UpperCAmelCase_ ( ): # Time lowercase_ = {"""time""": time.time()} gc.collect() torch.cuda.empty_cache() # CPU mem lowercase_ = psutil.Process().memory_info().rss cpu_peak_tracker.start() # GPU mem for i in range(torch.cuda.device_count() ): lowercase_ = torch.cuda.memory_allocated(UpperCAmelCase__ ) torch.cuda.reset_peak_memory_stats() return measures def UpperCAmelCase_ ( UpperCAmelCase__ ): # Time lowercase_ = {"""time""": time.time() - start_measures["""time"""]} gc.collect() torch.cuda.empty_cache() # CPU mem lowercase_ = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**2_0 lowercase_ = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**2_0 # GPU mem for i in range(torch.cuda.device_count() ): lowercase_ = (torch.cuda.memory_allocated(UpperCAmelCase__ ) - start_measures[str(UpperCAmelCase__ )]) / 2**2_0 lowercase_ = (torch.cuda.max_memory_allocated(UpperCAmelCase__ ) - start_measures[str(UpperCAmelCase__ )]) / 2**2_0 return measures def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): print(F'''{description}:''' ) print(F'''- Time: {measures["time"]:.2f}s''' ) for i in range(torch.cuda.device_count() ): print(F'''- GPU {i} allocated: {measures[str(UpperCAmelCase__ )]:.2f}MiB''' ) lowercase_ = measures[F'''{i}-peak'''] print(F'''- GPU {i} peak: {peak:.2f}MiB''' ) print(F'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' ) print(F'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
650
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : Optional[str] = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the training data.'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the validation data.'} ) __SCREAMING_SNAKE_CASE : Optional[float] = field( default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} ) __SCREAMING_SNAKE_CASE : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} ) __SCREAMING_SNAKE_CASE : float = field( default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = {} if self.train_dir is not None: lowercase_ = self.train_dir if self.validation_dir is not None: lowercase_ = self.validation_dir lowercase_ = data_files if data_files else None @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : str = field( default=__magic_name__ , metadata={ 'help': ( 'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ' 'checkpoint identifier on the hub. ' 'Don\'t set if you want to train a model from scratch.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , ) __SCREAMING_SNAKE_CASE : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) __SCREAMING_SNAKE_CASE : str = field(default=__magic_name__ , metadata={'help': 'Name or path of preprocessor config.'} ) __SCREAMING_SNAKE_CASE : bool = field( default=__magic_name__ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={'help': 'Stride to use for the encoder.'} , ) class UpperCamelCase__ : def __init__( self : Dict , UpperCamelCase__ : List[Any]=192 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : str=0.6 ): '''simple docstring''' lowercase_ = input_size lowercase_ = mask_patch_size lowercase_ = model_patch_size lowercase_ = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("""Input size must be divisible by mask patch size""" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("""Mask patch size must be divisible by model patch size""" ) lowercase_ = self.input_size // self.mask_patch_size lowercase_ = self.mask_patch_size // self.model_patch_size lowercase_ = self.rand_size**2 lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : int ): '''simple docstring''' lowercase_ = np.random.permutation(self.token_count )[: self.mask_count] lowercase_ = np.zeros(self.token_count , dtype=UpperCamelCase__ ) lowercase_ = 1 lowercase_ = mask.reshape((self.rand_size, self.rand_size) ) lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = torch.stack([example["""pixel_values"""] for example in examples] ) lowercase_ = torch.stack([example["""mask"""] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def UpperCAmelCase_ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mim""" , UpperCAmelCase__ , UpperCAmelCase__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase_ = training_args.get_process_log_level() logger.setLevel(UpperCAmelCase__ ) transformers.utils.logging.set_verbosity(UpperCAmelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowercase_ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase_ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. lowercase_ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. lowercase_ = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0: lowercase_ = ds["""train"""].train_test_split(data_args.train_val_split ) lowercase_ = split["""train"""] lowercase_ = split["""test"""] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase_ = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase__ ) elif model_args.model_name_or_path: lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ ) else: lowercase_ = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(UpperCAmelCase__ , """decoder_type""" ): lowercase_ = """simmim""" # adapt config lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size lowercase_ = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { """image_size""": model_args.image_size, """patch_size""": model_args.patch_size, """encoder_stride""": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ ) elif model_args.model_name_or_path: lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ ) else: lowercase_ = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: lowercase_ = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) lowercase_ = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase__ ) if training_args.do_train: lowercase_ = ds["""train"""].column_names else: lowercase_ = ds["""validation"""].column_names if data_args.image_column_name is not None: lowercase_ = data_args.image_column_name elif "image" in column_names: lowercase_ = """image""" elif "img" in column_names: lowercase_ = """img""" else: lowercase_ = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py lowercase_ = Compose( [ Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator lowercase_ = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(UpperCAmelCase__ ): lowercase_ = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]] lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: lowercase_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(UpperCAmelCase__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: lowercase_ = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(UpperCAmelCase__ ) # Initialize our trainer lowercase_ = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: lowercase_ = None if training_args.resume_from_checkpoint is not None: lowercase_ = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase_ = last_checkpoint lowercase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowercase_ = trainer.evaluate() trainer.log_metrics("""eval""" , UpperCAmelCase__ ) trainer.save_metrics("""eval""" , UpperCAmelCase__ ) # Write model card and (optionally) push to hub lowercase_ = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """masked-image-modeling""", """dataset""": data_args.dataset_name, """tags""": ["""masked-image-modeling"""], } if training_args.push_to_hub: trainer.push_to_hub(**UpperCAmelCase__ ) else: trainer.create_model_card(**UpperCAmelCase__ ) if __name__ == "__main__": main()
650
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a = logging.get_logger(__name__) class UpperCamelCase__ ( __magic_name__ , __magic_name__ ): __SCREAMING_SNAKE_CASE : Dict = 'maskformer-swin' __SCREAMING_SNAKE_CASE : List[Any] = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : List[str] , UpperCamelCase__ : List[str]=224 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Tuple=96 , UpperCamelCase__ : Dict=[2, 2, 6, 2] , UpperCamelCase__ : Dict=[3, 6, 12, 24] , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Dict=4.0 , UpperCamelCase__ : Any=True , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Any=False , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : List[str]=1e-5 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=None , **UpperCamelCase__ : str , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = image_size lowercase_ = patch_size lowercase_ = num_channels lowercase_ = embed_dim lowercase_ = depths lowercase_ = len(UpperCamelCase__ ) lowercase_ = num_heads lowercase_ = window_size lowercase_ = mlp_ratio lowercase_ = qkv_bias lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = drop_path_rate lowercase_ = hidden_act lowercase_ = use_absolute_embeddings lowercase_ = layer_norm_eps lowercase_ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowercase_ = int(embed_dim * 2 ** (len(UpperCamelCase__ ) - 1) ) lowercase_ = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(UpperCamelCase__ ) + 1 )] lowercase_ , lowercase_ = get_aligned_output_features_output_indices( out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
650
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging a = logging.get_logger(__name__) class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values'] def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Dict , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = size if size is not None else {"""shortest_edge""": 224} lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" ) lowercase_ = do_resize lowercase_ = size lowercase_ = resample lowercase_ = do_center_crop lowercase_ = crop_size lowercase_ = do_rescale lowercase_ = rescale_factor lowercase_ = do_normalize lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowercase_ = int((256 / 224) * size["""shortest_edge"""] ) lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ ) lowercase_ = {"""height""": output_size[0], """width""": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ): '''simple docstring''' lowercase_ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ): '''simple docstring''' return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' lowercase_ = do_resize if do_resize is not None else self.do_resize lowercase_ = resample if resample is not None else self.resample lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase_ = do_rescale if do_rescale is not None else self.do_rescale lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ = do_normalize if do_normalize is not None else self.do_normalize lowercase_ = image_mean if image_mean is not None else self.image_mean lowercase_ = image_std if image_std is not None else self.image_std lowercase_ = size if size is not None else self.size lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) lowercase_ = crop_size if crop_size is not None else self.crop_size lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" ) lowercase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: lowercase_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images] if do_center_crop: lowercase_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images] if do_rescale: lowercase_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images] if do_normalize: lowercase_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images] lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] lowercase_ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
650
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()['__file__'], _import_structure)
650
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) a = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
650
1
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def UpperCAmelCase_ ( UpperCAmelCase__ = 8 ): lowercase_ = ascii_letters + digits + punctuation return "".join(secrets.choice(UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ ) ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(UpperCAmelCase__ ) lowercase_ = i // 3 lowercase_ = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) lowercase_ = ( chars_incl + random(UpperCAmelCase__ , quotient + remainder ) + random(UpperCAmelCase__ , UpperCAmelCase__ ) + random(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase_ = list(UpperCAmelCase__ ) shuffle(UpperCAmelCase__ ) return "".join(UpperCAmelCase__ ) # random is a generalised function for letters, characters and numbers def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): return "".join(secrets.choice(UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ ) ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): pass # Put your code here... def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): pass # Put your code here... def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): pass # Put your code here... def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 8 ): if len(UpperCAmelCase__ ) < min_length: # Your Password must be at least 8 characters long return False lowercase_ = any(char in ascii_uppercase for char in password ) lowercase_ = any(char in ascii_lowercase for char in password ) lowercase_ = any(char in digits for char in password ) lowercase_ = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def UpperCAmelCase_ ( ): lowercase_ = int(input("""Please indicate the max length of your password: """ ).strip() ) lowercase_ = input( """Please indicate the characters that must be in your password: """ ).strip() print("""Password generated:""" , password_generator(UpperCAmelCase__ ) ) print( """Alternative Password generated:""" , alternative_password_generator(UpperCAmelCase__ , UpperCAmelCase__ ) , ) print("""[If you are thinking of using this passsword, You better save it.]""" ) if __name__ == "__main__": main()
650
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() a = logging.get_logger(__name__) a = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } a = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): for attribute in key.split(""".""" ): lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) if weight_type is not None: lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape else: lowercase_ = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase_ = value elif weight_type == "weight_g": lowercase_ = value elif weight_type == "weight_v": lowercase_ = value elif weight_type == "bias": lowercase_ = value else: lowercase_ = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = [] lowercase_ = fairseq_model.state_dict() lowercase_ = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight lowercase_ = None for name, value in fairseq_dict.items(): lowercase_ = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , ) lowercase_ = True elif name.split(""".""" )[0] == "proj": lowercase_ = fairseq_model.proj lowercase_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: lowercase_ = True if "*" in mapped_key: lowercase_ = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2] lowercase_ = mapped_key.replace("""*""" , UpperCAmelCase__ ) if "weight_g" in name: lowercase_ = """weight_g""" elif "weight_v" in name: lowercase_ = """weight_v""" elif "bias" in name: lowercase_ = """bias""" elif "weight" in name: lowercase_ = """weight""" else: lowercase_ = None set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) continue if not is_used: unused_weights.append(UpperCAmelCase__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) return proj_weight def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = full_name.split("""conv_layers.""" )[-1] lowercase_ = name.split(""".""" ) lowercase_ = int(items[0] ) lowercase_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowercase_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCAmelCase__ ) def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ , lowercase_ = emb.weight.shape lowercase_ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ ) lowercase_ = emb.weight.data return lin_layer def UpperCAmelCase_ ( UpperCAmelCase__ ): with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f: lowercase_ = f.readlines() lowercase_ = [line.split(""" """ )[0] for line in lines] lowercase_ = len(UpperCAmelCase__ ) lowercase_ = { """<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3, } vocab_dict.update(dict(zip(UpperCAmelCase__ , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ): lowercase_ = WavaVecaConfig.from_pretrained(UpperCAmelCase__ ) lowercase_ = SpeechaTextaConfig.from_pretrained( UpperCAmelCase__ , vocab_size=UpperCAmelCase__ , decoder_layers=UpperCAmelCase__ , do_stable_layer_norm=UpperCAmelCase__ ) lowercase_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ) lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) lowercase_ = model[0].eval() # set weights for wav2vec2 encoder lowercase_ = WavaVecaModel(UpperCAmelCase__ ) lowercase_ = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase__ ) lowercase_ = SpeechaTextaForCausalLM(UpperCAmelCase__ ) lowercase_ , lowercase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase__ ) # set output linear layer unexpected_keys.remove("""embed_out""" ) lowercase_ = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) lowercase_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ ) lowercase_ = False # add projection layer lowercase_ = nn.Parameter(projection_layer.weight ) lowercase_ = nn.Parameter(projection_layer.bias ) lowercase_ = create_vocab_dict(UpperCAmelCase__ ) with open(os.path.join(UpperCAmelCase__ , """vocab.json""" ) , """w""" ) as fp: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase__ , """vocab.json""" ) ) tokenizer.save_pretrained(UpperCAmelCase__ ) lowercase_ = hf_wavavec.config.to_dict() lowercase_ = tokenizer.pad_token_id lowercase_ = tokenizer.bos_token_id lowercase_ = tokenizer.eos_token_id lowercase_ = """speech_to_text_2""" lowercase_ = """wav2vec2""" lowercase_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ ) hf_wavavec.save_pretrained(UpperCAmelCase__ ) feature_extractor.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-large-lv60', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/s2t-small-mustc-en-fr-st', type=str, help='Path to hf decoder s2t checkpoint config', ) parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder') parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers') a = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
650
1
def UpperCAmelCase_ ( UpperCAmelCase__ ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
650
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) # TODO Update this a = { 'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json', # See all ESM models at https://huggingface.co/models?filter=esm } class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Tuple = 'esm' def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Dict=3_072 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=1_026 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : str , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = position_embedding_type lowercase_ = use_cache lowercase_ = emb_layer_norm_before lowercase_ = token_dropout lowercase_ = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("""No esmfold_config supplied for folding model, using default values.""" ) lowercase_ = EsmFoldConfig() elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = EsmFoldConfig(**UpperCamelCase__ ) lowercase_ = esmfold_config if vocab_list is None: logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" ) lowercase_ = get_default_vocab_list() else: lowercase_ = vocab_list else: lowercase_ = None lowercase_ = None if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ): raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = super().to_dict() if isinstance(self.esmfold_config , UpperCamelCase__ ): lowercase_ = self.esmfold_config.to_dict() return output @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : str = None __SCREAMING_SNAKE_CASE : bool = True __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : float = 0 __SCREAMING_SNAKE_CASE : bool = True __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : "TrunkConfig" = None def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' if self.trunk is None: lowercase_ = TrunkConfig() elif isinstance(self.trunk , UpperCamelCase__ ): lowercase_ = TrunkConfig(**self.trunk ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = asdict(self ) lowercase_ = self.trunk.to_dict() return output @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : int = 48 __SCREAMING_SNAKE_CASE : int = 1024 __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : float = 0 __SCREAMING_SNAKE_CASE : float = 0 __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : Optional[int] = 128 __SCREAMING_SNAKE_CASE : "StructureModuleConfig" = None def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' if self.structure_module is None: lowercase_ = StructureModuleConfig() elif isinstance(self.structure_module , UpperCamelCase__ ): lowercase_ = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( """`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got""" F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( """`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got""" F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) lowercase_ = self.sequence_state_dim // self.sequence_head_width lowercase_ = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( """`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got""" F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( """`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got""" F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = asdict(self ) lowercase_ = self.structure_module.to_dict() return output @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : int = 384 __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : int = 16 __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : int = 12 __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : int = 8 __SCREAMING_SNAKE_CASE : float = 0.1 __SCREAMING_SNAKE_CASE : int = 8 __SCREAMING_SNAKE_CASE : int = 1 __SCREAMING_SNAKE_CASE : int = 2 __SCREAMING_SNAKE_CASE : int = 7 __SCREAMING_SNAKE_CASE : int = 10 __SCREAMING_SNAKE_CASE : float = 1e-8 __SCREAMING_SNAKE_CASE : float = 1e5 def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' return asdict(self ) def UpperCAmelCase_ ( ): return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
650
1
import numpy as np import datasets a = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' a = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' a = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): def UpperCAmelCase__ ( self : int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ), } ) , ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' lowercase_ = np.array(UpperCamelCase__ ) lowercase_ = np.array(UpperCamelCase__ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("""Expected `X` to be a 2D vector""" ) if len(reference_distribution.shape ) != 2: raise ValueError("""Expected `reference_distribution` to be a 2D vector""" ) if reference_distribution.shape[0] < 2: raise ValueError( """Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" ) # Get mahalanobis distance for each prediction lowercase_ = X - np.mean(UpperCamelCase__ ) lowercase_ = np.cov(reference_distribution.T ) try: lowercase_ = np.linalg.inv(UpperCamelCase__ ) except np.linalg.LinAlgError: lowercase_ = np.linalg.pinv(UpperCamelCase__ ) lowercase_ = np.dot(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = np.dot(UpperCamelCase__ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
650
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def UpperCAmelCase_ ( UpperCAmelCase__=None ): if subparsers is not None: lowercase_ = subparsers.add_parser("""env""" ) else: lowercase_ = argparse.ArgumentParser("""Accelerate env command""" ) parser.add_argument( """--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" ) if subparsers is not None: parser.set_defaults(func=UpperCAmelCase__ ) return parser def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = torch.__version__ lowercase_ = torch.cuda.is_available() lowercase_ = is_xpu_available() lowercase_ = is_npu_available() lowercase_ = """Not found""" # Get the default from the config file. if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ): lowercase_ = load_config_from_file(args.config_file ).to_dict() lowercase_ = { """`Accelerate` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """Numpy version""": np.__version__, """PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''', """PyTorch XPU available""": str(UpperCAmelCase__ ), """PyTorch NPU available""": str(UpperCAmelCase__ ), """System RAM""": F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''', } if pt_cuda_available: lowercase_ = torch.cuda.get_device_name() print("""\nCopy-and-paste the text below in your GitHub issue\n""" ) print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) ) print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" ) lowercase_ = ( """\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else F'''\t{accelerate_config}''' ) print(UpperCAmelCase__ ) lowercase_ = accelerate_config return info def UpperCAmelCase_ ( ): lowercase_ = env_command_parser() lowercase_ = parser.parse_args() env_command(UpperCAmelCase__ ) return 0 if __name__ == "__main__": raise SystemExit(main())
650
1
# Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar a = TypeVar('T') class UpperCamelCase__ ( Generic[T] ): def __init__( self : Dict , UpperCamelCase__ : bool = True ): '''simple docstring''' lowercase_ = {} # dictionary of lists lowercase_ = directed def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : T , UpperCamelCase__ : T ): '''simple docstring''' if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCamelCase__ ) self.adj_list[destination_vertex].append(UpperCamelCase__ ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCamelCase__ ) lowercase_ = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(UpperCamelCase__ ) lowercase_ = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: lowercase_ = [destination_vertex] lowercase_ = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCamelCase__ ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCamelCase__ ) lowercase_ = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: lowercase_ = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: lowercase_ = [destination_vertex] lowercase_ = [] return self def __repr__( self : Optional[int] ): '''simple docstring''' return pformat(self.adj_list )
650
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class UpperCamelCase__ : def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Tuple=30 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=2 , ): '''simple docstring''' lowercase_ = parent lowercase_ = batch_size lowercase_ = image_size lowercase_ = patch_size lowercase_ = num_channels lowercase_ = is_training lowercase_ = use_labels lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = scope lowercase_ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowercase_ = (image_size // patch_size) ** 2 lowercase_ = num_patches + 2 def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ): '''simple docstring''' lowercase_ = DeiTModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ): '''simple docstring''' lowercase_ = DeiTForMaskedImageModeling(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase_ = 1 lowercase_ = DeiTForMaskedImageModeling(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ): '''simple docstring''' lowercase_ = self.type_sequence_label_size lowercase_ = DeiTForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase_ = 1 lowercase_ = DeiTForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = config_and_inputs lowercase_ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : str = ( { 'feature-extraction': DeiTModel, 'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : List[Any] = False __SCREAMING_SNAKE_CASE : List[Any] = False def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = DeiTModelTester(self ) lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = model_class(UpperCamelCase__ ) lowercase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ = [*signature.parameters.keys()] lowercase_ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False ): '''simple docstring''' lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' if not self.model_tester.is_training: return lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(UpperCamelCase__ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue lowercase_ = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.train() lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowercase_ = model(**UpperCamelCase__ ).loss loss.backward() def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return lowercase_ = False lowercase_ = True for model_class in self.all_model_classes: if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue lowercase_ = model_class(UpperCamelCase__ ) model.gradient_checkpointing_enable() model.to(UpperCamelCase__ ) model.train() lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowercase_ = model(**UpperCamelCase__ ).loss loss.backward() def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = [ {"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float}, {"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long}, {"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(UpperCamelCase__ ), *get_values(UpperCamelCase__ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ): lowercase_ = problem_type["""title"""] lowercase_ = problem_type["""num_labels"""] lowercase_ = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.train() lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) if problem_type["num_labels"] > 1: lowercase_ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] ) lowercase_ = inputs["""labels"""].to(problem_type["""dtype"""] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list: lowercase_ = model(**UpperCamelCase__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def UpperCAmelCase__ ( self : int ): '''simple docstring''' for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ = DeiTModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def UpperCAmelCase_ ( ): lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to( UpperCamelCase__ ) lowercase_ = self.default_image_processor lowercase_ = prepare_img() lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowercase_ = model(**UpperCamelCase__ ) # verify the logits lowercase_ = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowercase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = DeiTModel.from_pretrained( """facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" ) lowercase_ = self.default_image_processor lowercase_ = prepare_img() lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ) lowercase_ = inputs.pixel_values.to(UpperCamelCase__ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowercase_ = model(UpperCamelCase__ )
650
1
def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = [] lowercase_ = [] lowercase_ = { """^""": 3, """*""": 2, """/""": 2, """%""": 2, """+""": 1, """-""": 1, } # Priority of each operator lowercase_ = len(UpperCAmelCase__ ) if (len(UpperCAmelCase__ ) > 7) else 7 # Print table header for output print( """Symbol""".center(8 ) , """Stack""".center(UpperCAmelCase__ ) , """Postfix""".center(UpperCAmelCase__ ) , sep=""" | """ , ) print("""-""" * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(UpperCAmelCase__ ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(UpperCAmelCase__ ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(UpperCAmelCase__ ) == 0: stack.append(UpperCAmelCase__ ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(UpperCAmelCase__ ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(UpperCAmelCase__ ) # push x to stack print( x.center(8 ) , ("""""".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , ("""""".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , sep=""" | """ , ) # Output in tabular format while len(UpperCAmelCase__ ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( """ """.center(8 ) , ("""""".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , ("""""".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , sep=""" | """ , ) # Output in tabular format return "".join(UpperCAmelCase__ ) # return Postfix as str def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = list(infix[::-1] ) # reverse the infix equation for i in range(len(UpperCAmelCase__ ) ): if infix[i] == "(": lowercase_ = """)""" # change "(" to ")" elif infix[i] == ")": lowercase_ = """(""" # change ")" to "(" return (infix_2_postfix("""""".join(UpperCAmelCase__ ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": a = input('\nEnter an Infix Equation = ') # Input an Infix equation a = ''.join(Infix.split()) # Remove spaces from the input print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
650
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
650
1
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = old_name if "patch_embed" in old_name: lowercase_ , lowercase_ , lowercase_ = old_name.split(""".""" ) if layer == "0": lowercase_ = old_name.replace("""0""" , """convolution1""" ) elif layer == "1": lowercase_ = old_name.replace("""1""" , """batchnorm_before""" ) elif layer == "3": lowercase_ = old_name.replace("""3""" , """convolution2""" ) else: lowercase_ = old_name.replace("""4""" , """batchnorm_after""" ) if "network" in old_name and re.search(r"""\d\.\d""" , UpperCAmelCase__ ): lowercase_ = r"""\b\d{2}\b""" if bool(re.search(UpperCAmelCase__ , UpperCAmelCase__ ) ): lowercase_ = re.search(r"""\d\.\d\d.""" , UpperCAmelCase__ ).group() else: lowercase_ = re.search(r"""\d\.\d.""" , UpperCAmelCase__ ).group() if int(match[0] ) < 6: lowercase_ = old_name.replace(UpperCAmelCase__ , """""" ) lowercase_ = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] ) lowercase_ = """intermediate_stages.""" + trimmed_name else: lowercase_ = old_name.replace(UpperCAmelCase__ , """""" ) if int(match[2] ) < num_meta4D_last_stage: lowercase_ = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] ) else: lowercase_ = str(int(match[2] ) - num_meta4D_last_stage ) lowercase_ = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index ) if "norm1" in old_name: lowercase_ = trimmed_name.replace("""norm1""" , """layernorm1""" ) elif "norm2" in old_name: lowercase_ = trimmed_name.replace("""norm2""" , """layernorm2""" ) elif "fc1" in old_name: lowercase_ = trimmed_name.replace("""fc1""" , """linear_in""" ) elif "fc2" in old_name: lowercase_ = trimmed_name.replace("""fc2""" , """linear_out""" ) lowercase_ = """last_stage.""" + trimmed_name elif "network" in old_name and re.search(r""".\d.""" , UpperCAmelCase__ ): lowercase_ = old_name.replace("""network""" , """intermediate_stages""" ) if "fc" in new_name: lowercase_ = new_name.replace("""fc""" , """convolution""" ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): lowercase_ = new_name.replace("""norm1""" , """batchnorm_before""" ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): lowercase_ = new_name.replace("""norm2""" , """batchnorm_after""" ) if "proj" in new_name: lowercase_ = new_name.replace("""proj""" , """projection""" ) if "dist_head" in new_name: lowercase_ = new_name.replace("""dist_head""" , """distillation_classifier""" ) elif "head" in new_name: lowercase_ = new_name.replace("""head""" , """classifier""" ) elif "patch_embed" in new_name: lowercase_ = """efficientformer.""" + new_name elif new_name == "norm.weight" or new_name == "norm.bias": lowercase_ = new_name.replace("""norm""" , """layernorm""" ) lowercase_ = """efficientformer.""" + new_name else: lowercase_ = """efficientformer.encoder.""" + new_name return new_name def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): for key in checkpoint.copy().keys(): lowercase_ = checkpoint.pop(UpperCAmelCase__ ) lowercase_ = val return checkpoint def UpperCAmelCase_ ( ): lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowercase_ = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ) return image def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = torch.load(UpperCAmelCase__ , map_location="""cpu""" )["""model"""] lowercase_ = EfficientFormerConfig.from_json_file(UpperCAmelCase__ ) lowercase_ = EfficientFormerForImageClassificationWithTeacher(UpperCAmelCase__ ) lowercase_ = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] ) lowercase_ = config.depths[-1] - config.num_metaad_blocks + 1 lowercase_ = convert_torch_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ ) model.load_state_dict(UpperCAmelCase__ ) model.eval() lowercase_ = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } # prepare image lowercase_ = prepare_img() lowercase_ = 2_5_6 lowercase_ = 2_2_4 lowercase_ = EfficientFormerImageProcessor( size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , ) lowercase_ = processor(images=UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values # original processing pipeline lowercase_ = Compose( [ Resize(UpperCAmelCase__ , interpolation=pillow_resamplings["""bicubic"""] ), CenterCrop(UpperCAmelCase__ ), ToTensor(), Normalize(UpperCAmelCase__ , UpperCAmelCase__ ), ] ) lowercase_ = image_transforms(UpperCAmelCase__ ).unsqueeze(0 ) assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ = model(UpperCAmelCase__ ) lowercase_ = outputs.logits lowercase_ = (1, 1_0_0_0) if "l1" in model_name: lowercase_ = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :1_0] , UpperCAmelCase__ , atol=1e-3 ) assert logits.shape == expected_shape elif "l3" in model_name: lowercase_ = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :1_0] , UpperCAmelCase__ , atol=1e-3 ) assert logits.shape == expected_shape elif "l7" in model_name: lowercase_ = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' ) # Save Checkpoints Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) model.save_pretrained(UpperCAmelCase__ ) print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' ) processor.save_pretrained(UpperCAmelCase__ ) print(F'''Processor successfuly saved at {pytorch_dump_path}''' ) if push_to_hub: print("""Pushing model to the hub...""" ) model.push_to_hub( repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add model""" , use_temp_dir=UpperCAmelCase__ , ) processor.push_to_hub( repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add image processor""" , use_temp_dir=UpperCAmelCase__ , ) if __name__ == "__main__": a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--pytorch_model_path', default=None, type=str, required=True, help='Path to EfficientFormer pytorch checkpoint.', ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The json file for EfficientFormer model config.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) parser.set_defaults(push_to_hub=True) a = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
650
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizer __SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizerFast __SCREAMING_SNAKE_CASE : List[Any] = True __SCREAMING_SNAKE_CASE : int = True def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = """<pad>""" lowercase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(len(UpperCamelCase__ ) , 1_008 ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) lowercase_ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowercase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowercase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def UpperCAmelCase__ ( self : str ): '''simple docstring''' return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(UpperCamelCase__ , f.name ) lowercase_ = XGLMTokenizer(f.name , keep_accents=UpperCamelCase__ ) lowercase_ = pickle.dumps(UpperCamelCase__ ) pickle.loads(UpperCamelCase__ ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' if not self.test_rust_tokenizer: return lowercase_ = self.get_tokenizer() lowercase_ = self.get_rust_tokenizer() lowercase_ = """I was born in 92000, and this is falsé.""" lowercase_ = tokenizer.tokenize(UpperCamelCase__ ) lowercase_ = rust_tokenizer.tokenize(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) lowercase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = self.get_rust_tokenizer() lowercase_ = tokenizer.encode(UpperCamelCase__ ) lowercase_ = rust_tokenizer.encode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = """Hello World!""" lowercase_ = [2, 31_227, 4_447, 35] self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) ) @slow def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth""" ) # fmt: off lowercase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) ) @slow def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = { """input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="""facebook/xglm-564M""" , padding=UpperCamelCase__ , )
650
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class UpperCamelCase__ ( unittest.TestCase ): def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowercase_ = get_activation("""gelu""" ) self.assertTrue(torch.allclose(gelu_python(UpperCamelCase__ ) , torch_builtin(UpperCamelCase__ ) ) ) self.assertFalse(torch.allclose(gelu_python(UpperCamelCase__ ) , gelu_new(UpperCamelCase__ ) ) ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowercase_ = get_activation("""gelu""" ) lowercase_ = get_activation("""gelu_10""" ) lowercase_ = torch_builtin(UpperCamelCase__ ) lowercase_ = geluaa(UpperCamelCase__ ) lowercase_ = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(UpperCamelCase__ ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' get_activation("""gelu""" ) get_activation("""gelu_10""" ) get_activation("""gelu_fast""" ) get_activation("""gelu_new""" ) get_activation("""gelu_python""" ) get_activation("""gelu_pytorch_tanh""" ) get_activation("""linear""" ) get_activation("""mish""" ) get_activation("""quick_gelu""" ) get_activation("""relu""" ) get_activation("""sigmoid""" ) get_activation("""silu""" ) get_activation("""swish""" ) get_activation("""tanh""" ) with self.assertRaises(UpperCamelCase__ ): get_activation("""bogus""" ) with self.assertRaises(UpperCamelCase__ ): get_activation(UpperCamelCase__ ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = get_activation("""gelu""" ) lowercase_ = 1 lowercase_ = get_activation("""gelu""" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(UpperCamelCase__ ): lowercase_ = acta.a
650
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: a = None a = logging.get_logger(__name__) a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} a = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 a = { 't5-small': 5_1_2, 't5-base': 5_1_2, 't5-large': 5_1_2, 't5-3b': 5_1_2, 't5-11b': 5_1_2, } class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask'] __SCREAMING_SNAKE_CASE : Dict = TaTokenizer __SCREAMING_SNAKE_CASE : List[int] = [] def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]=100 , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: lowercase_ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens lowercase_ = len(set(filter(lambda UpperCamelCase__ : bool("""extra_id_""" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) ) if extra_tokens != extra_ids: raise ValueError( F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , ) lowercase_ = vocab_file lowercase_ = False if not self.vocab_file else True lowercase_ = extra_ids @staticmethod def UpperCAmelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int ): '''simple docstring''' if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: lowercase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F''' {pretrained_model_name_or_path} automatically truncating your input to''' F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase__ , ) return max_model_length def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ): copyfile(self.vocab_file , UpperCamelCase__ ) logger.info(F'''Copy vocab file to {out_vocab_file}''' ) return (out_vocab_file,) def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase_ = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: lowercase_ = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return list( set(filter(lambda UpperCamelCase__ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
650
1
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging a = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase__ ( __magic_name__ ): def __init__( self : Optional[Any] , UpperCamelCase__ : WhisperForConditionalGeneration , UpperCamelCase__ : WhisperProcessor , UpperCamelCase__ : AutoencoderKL , UpperCamelCase__ : CLIPTextModel , UpperCamelCase__ : CLIPTokenizer , UpperCamelCase__ : UNetaDConditionModel , UpperCamelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase__ : StableDiffusionSafetyChecker , UpperCamelCase__ : CLIPImageProcessor , ): '''simple docstring''' super().__init__() if safety_checker is None: logger.warning( F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure''' """ that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered""" """ results in services or applications open to the public. Both the diffusers team and Hugging Face""" """ strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling""" """ it only for use-cases that involve analyzing network behavior or auditing its results. For more""" """ information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" ) self.register_modules( speech_model=UpperCamelCase__ , speech_processor=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Union[str, int]] = "auto" ): '''simple docstring''' if slice_size == "auto": lowercase_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCamelCase__ ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' self.enable_attention_slicing(UpperCamelCase__ ) @torch.no_grad() def __call__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int]=16_000 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 50 , UpperCamelCase__ : float = 7.5 , UpperCamelCase__ : Optional[Union[str, List[str]]] = None , UpperCamelCase__ : Optional[int] = 1 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase__ : int = 1 , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' lowercase_ = self.speech_processor.feature_extractor( UpperCamelCase__ , return_tensors="""pt""" , sampling_rate=UpperCamelCase__ ).input_features.to(self.device ) lowercase_ = self.speech_model.generate(UpperCamelCase__ , max_length=480_000 ) lowercase_ = self.speech_processor.tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , normalize=UpperCamelCase__ )[ 0 ] if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = 1 elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = len(UpperCamelCase__ ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase__ )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(UpperCamelCase__ )}.''' ) # get prompt text embeddings lowercase_ = self.tokenizer( UpperCamelCase__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) lowercase_ = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: lowercase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) lowercase_ = text_input_ids[:, : self.tokenizer.model_max_length] lowercase_ = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method lowercase_ , lowercase_ , lowercase_ = text_embeddings.shape lowercase_ = text_embeddings.repeat(1 , UpperCamelCase__ , 1 ) lowercase_ = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCamelCase__ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. lowercase_ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: lowercase_ = 42 if negative_prompt is None: lowercase_ = [""""""] * batch_size elif type(UpperCamelCase__ ) is not type(UpperCamelCase__ ): raise TypeError( F'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase__ )} !=''' F''' {type(UpperCamelCase__ )}.''' ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = [negative_prompt] elif batch_size != len(UpperCamelCase__ ): raise ValueError( F'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase__ )}, but `prompt`:''' F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: lowercase_ = negative_prompt lowercase_ = text_input_ids.shape[-1] lowercase_ = self.tokenizer( UpperCamelCase__ , padding="""max_length""" , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" , ) lowercase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowercase_ = uncond_embeddings.shape[1] lowercase_ = uncond_embeddings.repeat(1 , UpperCamelCase__ , 1 ) lowercase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCamelCase__ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowercase_ = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. lowercase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) lowercase_ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps lowercase_ = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device="""cpu""" , dtype=UpperCamelCase__ ).to( self.device ) else: lowercase_ = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) lowercase_ = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(UpperCamelCase__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand lowercase_ = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowercase_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowercase_ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowercase_ = {} if accepts_eta: lowercase_ = eta for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ): # expand the latents if we are doing classifier free guidance lowercase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase_ = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ ) # predict the noise residual lowercase_ = self.unet(UpperCamelCase__ , UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ ).sample # perform guidance if do_classifier_free_guidance: lowercase_ , lowercase_ = noise_pred.chunk(2 ) lowercase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 lowercase_ = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = 1 / 0.18_215 * latents lowercase_ = self.vae.decode(UpperCamelCase__ ).sample lowercase_ = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowercase_ = self.numpy_to_pil(UpperCamelCase__ ) if not return_dict: return image return StableDiffusionPipelineOutput(images=UpperCamelCase__ , nsfw_content_detected=UpperCamelCase__ )
650
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionDiffEditPipeline __SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} __SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} __SCREAMING_SNAKE_CASE : int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __SCREAMING_SNAKE_CASE : Any = frozenset([] ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) lowercase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , ) lowercase_ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) lowercase_ = DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , ) torch.manual_seed(0 ) lowercase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowercase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , ) lowercase_ = CLIPTextModel(UpperCamelCase__ ) lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase_ = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=0 ): '''simple docstring''' lowercase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowercase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) if str(UpperCamelCase__ ).startswith("""mps""" ): lowercase_ = torch.manual_seed(UpperCamelCase__ ) else: lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowercase_ = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ): '''simple docstring''' lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ) if str(UpperCamelCase__ ).startswith("""mps""" ): lowercase_ = torch.manual_seed(UpperCamelCase__ ) else: lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowercase_ = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=0 ): '''simple docstring''' lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ) if str(UpperCamelCase__ ).startswith("""mps""" ): lowercase_ = torch.manual_seed(UpperCamelCase__ ) else: lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowercase_ = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : int ): '''simple docstring''' if not hasattr(self.pipeline_class , """_optional_components""" ): return lowercase_ = self.get_dummy_components() lowercase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) lowercase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowercase_ = pipe(**UpperCamelCase__ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCamelCase__ ) lowercase_ = self.pipeline_class.from_pretrained(UpperCamelCase__ ) pipe_loaded.to(UpperCamelCase__ ) pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ ) for optional_component in pipe._optional_components: self.assertTrue( getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , ) lowercase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowercase_ = pipe_loaded(**UpperCamelCase__ )[0] lowercase_ = np.abs(output - output_loaded ).max() self.assertLess(UpperCamelCase__ , 1e-4 ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = """cpu""" lowercase_ = self.get_dummy_components() lowercase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_dummy_mask_inputs(UpperCamelCase__ ) lowercase_ = pipe.generate_mask(**UpperCamelCase__ ) lowercase_ = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) lowercase_ = np.array([0] * 9 ) lowercase_ = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase__ , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = """cpu""" lowercase_ = self.get_dummy_components() lowercase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ ) lowercase_ = pipe.invert(**UpperCamelCase__ ).images lowercase_ = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowercase_ = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase__ , 1e-3 ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = """cpu""" lowercase_ = self.get_dummy_components() lowercase_ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""} lowercase_ = DPMSolverMultistepScheduler(**UpperCamelCase__ ) lowercase_ = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ ) lowercase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ ) lowercase_ = pipe.invert(**UpperCamelCase__ ).images lowercase_ = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowercase_ = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase__ , 1e-3 ) @require_torch_gpu @slow class UpperCamelCase__ ( unittest.TestCase ): def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def UpperCAmelCase__ ( cls : Dict ): '''simple docstring''' lowercase_ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" ) lowercase_ = raw_image.convert("""RGB""" ).resize((768, 768) ) lowercase_ = raw_image def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = torch.manual_seed(0 ) lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa ) lowercase_ = DDIMScheduler.from_config(pipe.scheduler.config ) lowercase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = """a bowl of fruit""" lowercase_ = """a bowl of pears""" lowercase_ = pipe.generate_mask( image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , ) lowercase_ = pipe.invert( prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents lowercase_ = pipe( prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0] lowercase_ = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1 def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = torch.manual_seed(0 ) lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa ) lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowercase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowercase_ = """a bowl of fruit""" lowercase_ = """a bowl of pears""" lowercase_ = pipe.generate_mask( image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , ) lowercase_ = pipe.invert( prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents lowercase_ = pipe( prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0] lowercase_ = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1
650
1
from __future__ import annotations import time import numpy as np a = [8, 5, 9, 7] a = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] a = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class UpperCamelCase__ : def __init__( self : Optional[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[list[int]] , ): '''simple docstring''' lowercase_ = claim_vector lowercase_ = allocated_resources_table lowercase_ = maximum_claim_table def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def UpperCAmelCase__ ( self : Any ): '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCamelCase__ ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' return {self.__need().index(UpperCamelCase__ ): i for i in self.__need()} def UpperCAmelCase__ ( self : Optional[Any] , **UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' lowercase_ = self.__need() lowercase_ = self.__allocated_resources_table lowercase_ = self.__available_resources() lowercase_ = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("""_""" * 50 + """\n""" ) while need_list: lowercase_ = False for each_need in need_list: lowercase_ = True for index, need in enumerate(UpperCamelCase__ ): if need > available_resources[index]: lowercase_ = False break if execution: lowercase_ = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowercase_ = original_need_index print(F'''Process {process_number + 1} is executing.''' ) # remove the process run from stack need_list.remove(UpperCamelCase__ ) # update available/freed resources stack lowercase_ = np.array(UpperCamelCase__ ) + np.array( alloc_resources_table[process_number] ) print( """Updated available resource stack for processes: """ + """ """.join([str(UpperCamelCase__ ) for x in available_resources] ) ) break if safe: print("""The process is in a safe state.\n""" ) else: print("""System in unsafe state. Aborting...\n""" ) break def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' print(""" """ * 9 + """Allocated Resource Table""" ) for item in self.__allocated_resources_table: print( F'''P{self.__allocated_resources_table.index(UpperCamelCase__ ) + 1}''' + """ """.join(F'''{it:>8}''' for it in item ) + """\n""" ) print(""" """ * 9 + """System Resource Table""" ) for item in self.__maximum_claim_table: print( F'''P{self.__maximum_claim_table.index(UpperCamelCase__ ) + 1}''' + """ """.join(F'''{it:>8}''' for it in item ) + """\n""" ) print( """Current Usage by Active Processes: """ + """ """.join(str(UpperCamelCase__ ) for x in self.__claim_vector ) ) print( """Initial Available Resources: """ + """ """.join(str(UpperCamelCase__ ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
650
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging a = logging.get_logger(__name__) class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : str = ['pixel_values'] def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 8 , **UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = do_rescale lowercase_ = rescale_factor lowercase_ = do_pad lowercase_ = pad_size def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] ): '''simple docstring''' return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None ): '''simple docstring''' lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ ) lowercase_ = (old_height // size + 1) * size - old_height lowercase_ = (old_width // size + 1) * size - old_width return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ): '''simple docstring''' lowercase_ = do_rescale if do_rescale is not None else self.do_rescale lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ = do_pad if do_pad is not None else self.do_pad lowercase_ = pad_size if pad_size is not None else self.pad_size lowercase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_rescale: lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_pad: lowercase_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] lowercase_ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
650
1
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): a = 'pt' elif is_tf_available(): a = 'tf' else: a = 'jax' class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = ByTaTokenizer __SCREAMING_SNAKE_CASE : List[str] = False def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' super().setUp() lowercase_ = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' return ByTaTokenizer.from_pretrained("""google/byt5-small""" ) def UpperCAmelCase__ ( self : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=20 , UpperCamelCase__ : List[str]=5 ): '''simple docstring''' lowercase_ = [] for i in range(len(UpperCamelCase__ ) ): try: lowercase_ = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ ) except UnicodeDecodeError: pass toks.append((i, tok) ) lowercase_ = list(filter(lambda UpperCamelCase__ : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , UpperCamelCase__ ) ) lowercase_ = list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) ) if max_length is not None and len(UpperCamelCase__ ) > max_length: lowercase_ = toks[:max_length] if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0: while len(UpperCamelCase__ ) < min_length: lowercase_ = toks + toks # toks_str = [t[1] for t in toks] lowercase_ = [t[0] for t in toks] # Ensure consistency lowercase_ = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ ) if " " not in output_txt and len(UpperCamelCase__ ) > 1: lowercase_ = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ ) + """ """ + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ ) ) if with_prefix_space: lowercase_ = """ """ + output_txt lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) return output_txt, output_ids def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = self.ta_base_tokenizer lowercase_ = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] ) lowercase_ = tokenizer(["""hi""", """I went to the gym""", """"""] ) self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = self.ta_base_tokenizer lowercase_ = """Unicode €.""" lowercase_ = tokenizer(UpperCamelCase__ ) lowercase_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ ) # decoding lowercase_ = tokenizer.decode(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , """Unicode €.</s>""" ) lowercase_ = tokenizer("""e è é ê ë""" ) lowercase_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ ) # decoding lowercase_ = tokenizer.decode(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , """e è é ê ë</s>""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.ta_base_tokenizer lowercase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] # fmt: off lowercase_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on lowercase_ = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) if FRAMEWORK != "jax": lowercase_ = list(batch.input_ids.numpy()[0] ) else: lowercase_ = list(batch.input_ids.tolist()[0] ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = self.ta_base_tokenizer lowercase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] lowercase_ = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""" , UpperCamelCase__ ) self.assertIn("""attention_mask""" , UpperCamelCase__ ) self.assertNotIn("""decoder_input_ids""" , UpperCamelCase__ ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = self.ta_base_tokenizer lowercase_ = [ """Summary of the text.""", """Another summary.""", ] lowercase_ = tokenizer( text_target=UpperCamelCase__ , max_length=32 , padding="""max_length""" , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = self.ta_base_tokenizer lowercase_ = ["""A long paragraph for summarization. </s>"""] lowercase_ = ["""Summary of the text. </s>"""] # fmt: off lowercase_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] lowercase_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on lowercase_ = tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , batch["""input_ids"""][0] ) self.assertEqual(UpperCamelCase__ , batch["""labels"""][0] ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test lowercase_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowercase_ = tempfile.mkdtemp() lowercase_ = """ He is very happy, UNwant\u00E9d,running""" lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) tokenizer.save_pretrained(UpperCamelCase__ ) lowercase_ = tokenizer.__class__.from_pretrained(UpperCamelCase__ ) lowercase_ = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) shutil.rmtree(UpperCamelCase__ ) lowercase_ = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowercase_ = tempfile.mkdtemp() lowercase_ = """ He is very happy, UNwant\u00E9d,running""" tokenizer.add_tokens(["""bim""", """bambam"""] ) lowercase_ = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) tokenizer.save_pretrained(UpperCamelCase__ ) lowercase_ = tokenizer.__class__.from_pretrained(UpperCamelCase__ ) lowercase_ = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) lowercase_ = tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(UpperCamelCase__ ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: lowercase_ = json.load(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: lowercase_ = json.load(UpperCamelCase__ ) lowercase_ = [F'''<extra_id_{i}>''' for i in range(125 )] lowercase_ = added_tokens_extra_ids + [ """an_additional_special_token""" ] lowercase_ = added_tokens_extra_ids + [ """an_additional_special_token""" ] with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(UpperCamelCase__ , UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(UpperCamelCase__ , UpperCamelCase__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowercase_ = tokenizer_class.from_pretrained( UpperCamelCase__ , ) self.assertIn( """an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowercase_ = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=UpperCamelCase__ )] lowercase_ = tokenizer_class.from_pretrained( UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , ) self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCamelCase__ ) lowercase_ = tokenizer_class.from_pretrained(UpperCamelCase__ ) self.assertTrue(tokenizer.decode([255] ) == """""" ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Any ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' pass def UpperCAmelCase__ ( self : int ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase_ = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""] lowercase_ = tokenizer.convert_tokens_to_string(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase_ = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] lowercase_ = 0 lowercase_ = tokenizer.convert_ids_to_tokens( UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) for attr in attributes_list: setattr(UpperCamelCase__ , attr + """_id""" , UpperCamelCase__ ) self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(getattr(UpperCamelCase__ , attr + """_id""" ) , UpperCamelCase__ ) setattr(UpperCamelCase__ , attr + """_id""" , UpperCamelCase__ ) self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(getattr(UpperCamelCase__ , attr + """_id""" ) , UpperCamelCase__ ) setattr(UpperCamelCase__ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens_ids""" ) , [] ) setattr(UpperCamelCase__ , """additional_special_tokens_ids""" , [token_id_to_test_setters] ) self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens""" ) , [token_to_test_setters] ) self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
650
def UpperCAmelCase_ ( UpperCAmelCase__ ): if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise TypeError("""Input value must be an 'int' type""" ) lowercase_ = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
650
1
def UpperCAmelCase_ ( UpperCAmelCase__ ): if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise TypeError("""Input value must be an 'int' type""" ) lowercase_ = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
650
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ): @register_to_config def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ): '''simple docstring''' super().__init__() lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = False lowercase_ = nn.Dropout(p=UpperCamelCase__ ) lowercase_ = TaConfig( vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , ) lowercase_ = nn.ModuleList() for lyr_num in range(UpperCamelCase__ ): lowercase_ = TaBlock(UpperCamelCase__ ) self.encoders.append(UpperCamelCase__ ) lowercase_ = TaLayerNorm(UpperCamelCase__ ) lowercase_ = nn.Dropout(p=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ): '''simple docstring''' lowercase_ = self.token_embedder(UpperCamelCase__ ) lowercase_ = encoder_input_tokens.shape[1] lowercase_ = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device ) x += self.position_encoding(UpperCamelCase__ ) lowercase_ = self.dropout_pre(UpperCamelCase__ ) # inverted the attention mask lowercase_ = encoder_input_tokens.size() lowercase_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ ) for lyr in self.encoders: lowercase_ = lyr(UpperCamelCase__ , UpperCamelCase__ )[0] lowercase_ = self.layer_norm(UpperCamelCase__ ) return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
650
1
from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { 'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Tuple = 'vit_mae' def __init__( self : str , UpperCamelCase__ : List[str]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : str=3_072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Any=1e-12 , UpperCamelCase__ : str=224 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : List[str]=8 , UpperCamelCase__ : Union[str, Any]=2_048 , UpperCamelCase__ : Tuple=0.75 , UpperCamelCase__ : Any=False , **UpperCamelCase__ : str , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = image_size lowercase_ = patch_size lowercase_ = num_channels lowercase_ = qkv_bias lowercase_ = decoder_num_attention_heads lowercase_ = decoder_hidden_size lowercase_ = decoder_num_hidden_layers lowercase_ = decoder_intermediate_size lowercase_ = mask_ratio lowercase_ = norm_pix_loss
650
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar a = TypeVar('T') class UpperCamelCase__ ( Generic[T] ): __SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys __SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache __SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache def __init__( self : str , UpperCamelCase__ : int ): '''simple docstring''' lowercase_ = deque() lowercase_ = set() if not n: lowercase_ = sys.maxsize elif n < 0: raise ValueError("""n should be an integer greater than 0.""" ) else: lowercase_ = n def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : T ): '''simple docstring''' if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: lowercase_ = self.dq_store.pop() self.key_reference.remove(UpperCamelCase__ ) else: self.dq_store.remove(UpperCamelCase__ ) self.dq_store.appendleft(UpperCamelCase__ ) self.key_reference.add(UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' for k in self.dq_store: print(UpperCamelCase__ ) def __repr__( self : Optional[Any] ): '''simple docstring''' return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() a = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
650
1
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) a = logging.get_logger(__name__) # pylint: disable=invalid-name a = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n' def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=8 ): lowercase_ = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 lowercase_ = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class UpperCamelCase__ ( __magic_name__ ): def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDConditionModel , UpperCamelCase__ : DDPMScheduler , UpperCamelCase__ : VQModel , ): '''simple docstring''' super().__init__() self.register_modules( unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , movq=UpperCamelCase__ , ) lowercase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' if latents is None: lowercase_ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) lowercase_ = latents.to(UpperCamelCase__ ) lowercase_ = latents * scheduler.init_noise_sigma return latents def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Dict=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) lowercase_ = torch.device(F'''cuda:{gpu_id}''' ) lowercase_ = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCamelCase__ , UpperCamelCase__ ) def UpperCAmelCase__ ( self : int , UpperCamelCase__ : int=0 ): '''simple docstring''' if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) lowercase_ = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase__ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowercase_ = None for cpu_offloaded_model in [self.unet, self.movq]: lowercase_ , lowercase_ = cpu_offload_with_hook(UpperCamelCase__ , UpperCamelCase__ , prev_module_hook=UpperCamelCase__ ) # We'll offload the last model manually. lowercase_ = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(UpperCamelCase__ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(UpperCamelCase__ ) def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 100 , UpperCamelCase__ : float = 4.0 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , ): '''simple docstring''' lowercase_ = self._execution_device lowercase_ = guidance_scale > 1.0 if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = torch.cat(UpperCamelCase__ , dim=0 ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = torch.cat(UpperCamelCase__ , dim=0 ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = torch.cat(UpperCamelCase__ , dim=0 ) lowercase_ = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: lowercase_ = image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 ) lowercase_ = negative_image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 ) lowercase_ = hint.repeat_interleave(UpperCamelCase__ , dim=0 ) lowercase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase__ ) lowercase_ = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase__ ) self.scheduler.set_timesteps(UpperCamelCase__ , device=UpperCamelCase__ ) lowercase_ = self.scheduler.timesteps lowercase_ = self.movq.config.latent_channels lowercase_ , lowercase_ = downscale_height_and_width(UpperCamelCase__ , UpperCamelCase__ , self.movq_scale_factor ) # create initial latent lowercase_ = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.scheduler , ) for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ): # expand the latents if we are doing classifier free guidance lowercase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase_ = {"""image_embeds""": image_embeds, """hint""": hint} lowercase_ = self.unet( sample=UpperCamelCase__ , timestep=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , added_cond_kwargs=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0] if do_classifier_free_guidance: lowercase_ , lowercase_ = noise_pred.split(latents.shape[1] , dim=1 ) lowercase_ , lowercase_ = noise_pred.chunk(2 ) lowercase_ , lowercase_ = variance_pred.chunk(2 ) lowercase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowercase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowercase_ , lowercase_ = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 lowercase_ = self.scheduler.step( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ , )[0] # post-processing lowercase_ = self.movq.decode(UpperCamelCase__ , force_not_quantize=UpperCamelCase__ )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: lowercase_ = image * 0.5 + 0.5 lowercase_ = image.clamp(0 , 1 ) lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowercase_ = self.numpy_to_pil(UpperCamelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase__ )
650
def UpperCAmelCase_ ( UpperCAmelCase__ ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
650
1
from typing import Any import numpy as np def UpperCAmelCase_ ( UpperCAmelCase__ ): return np.array_equal(UpperCAmelCase__ , matrix.conjugate().T ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = v.conjugate().T lowercase_ = v_star.dot(UpperCAmelCase__ ) assert isinstance(UpperCAmelCase__ , np.ndarray ) return (v_star_dot.dot(UpperCAmelCase__ )) / (v_star.dot(UpperCAmelCase__ )) def UpperCAmelCase_ ( ): lowercase_ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) lowercase_ = np.array([[1], [2], [3]] ) assert is_hermitian(UpperCAmelCase__ ), F'''{a} is not hermitian.''' print(rayleigh_quotient(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase_ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(UpperCAmelCase__ ), F'''{a} is not hermitian.''' assert rayleigh_quotient(UpperCAmelCase__ , UpperCAmelCase__ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
650
def UpperCAmelCase_ ( UpperCAmelCase__=2_8_1_2_3 ): lowercase_ = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i lowercase_ = set() lowercase_ = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(UpperCAmelCase__ ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
650
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class UpperCamelCase__ ( unittest.TestCase ): def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' lowercase_ = tempfile.mkdtemp() # fmt: off lowercase_ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on lowercase_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) lowercase_ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] lowercase_ = {"""unk_token""": """<unk>"""} lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase__ ) ) lowercase_ = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073], """image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711], } lowercase_ = os.path.join(self.tmpdirname , UpperCamelCase__ ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(UpperCamelCase__ , UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] , **UpperCamelCase__ : int ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : Any , **UpperCamelCase__ : Tuple ): '''simple docstring''' return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase_ = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = self.get_tokenizer() lowercase_ = self.get_rust_tokenizer() lowercase_ = self.get_image_processor() lowercase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) processor_slow.save_pretrained(self.tmpdirname ) lowercase_ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ ) lowercase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) processor_fast.save_pretrained(self.tmpdirname ) lowercase_ = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ ) self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ ) self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' lowercase_ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowercase_ = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 ) lowercase_ = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase__ ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = self.get_image_processor() lowercase_ = self.get_tokenizer() lowercase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowercase_ = self.prepare_image_inputs() lowercase_ = image_processor(UpperCamelCase__ , return_tensors="""np""" ) lowercase_ = processor(images=UpperCamelCase__ , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = self.get_image_processor() lowercase_ = self.get_tokenizer() lowercase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowercase_ = """lower newer""" lowercase_ = processor(text=UpperCamelCase__ ) lowercase_ = tokenizer(UpperCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.get_image_processor() lowercase_ = self.get_tokenizer() lowercase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowercase_ = """lower newer""" lowercase_ = self.prepare_image_inputs() lowercase_ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase__ ): processor() def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = self.get_image_processor() lowercase_ = self.get_tokenizer() lowercase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowercase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ = processor.batch_decode(UpperCamelCase__ ) lowercase_ = tokenizer.batch_decode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = self.get_image_processor() lowercase_ = self.get_tokenizer() lowercase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowercase_ = """lower newer""" lowercase_ = self.prepare_image_inputs() lowercase_ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
650
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class UpperCamelCase__ : def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ): '''simple docstring''' lowercase_ = parent lowercase_ = batch_size lowercase_ = seq_length lowercase_ = is_training lowercase_ = use_input_mask lowercase_ = use_token_type_ids lowercase_ = use_labels lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = num_labels lowercase_ = num_choices lowercase_ = scope def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ = None if self.use_input_mask: lowercase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ = None if self.use_token_type_ids: lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase_ = None lowercase_ = None lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase_ = ids_tensor([self.batch_size] , self.num_choices ) lowercase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ): '''simple docstring''' lowercase_ = OpenLlamaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' lowercase_ = True lowercase_ = OpenLlamaModel(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , ) lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , ) lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ): '''simple docstring''' lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ): '''simple docstring''' lowercase_ = True lowercase_ = True lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() # first forward pass lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , ) lowercase_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 ) lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0] lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0] # select random slice lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = config_and_inputs lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else () __SCREAMING_SNAKE_CASE : List[Any] = ( { 'feature-extraction': OpenLlamaModel, 'text-classification': OpenLlamaForSequenceClassification, 'text-generation': OpenLlamaForCausalLM, 'zero-shot': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : Optional[int] = False def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = OpenLlamaModelTester(self ) lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = 3 lowercase_ = input_dict["""input_ids"""] lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ ) lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = 3 lowercase_ = """single_label_classification""" lowercase_ = input_dict["""input_ids"""] lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ ) lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = 3 lowercase_ = """multi_label_classification""" lowercase_ = input_dict["""input_ids"""] lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ ) lowercase_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = ids_tensor([1, 10] , config.vocab_size ) lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase_ = OpenLlamaModel(UpperCamelCase__ ) original_model.to(UpperCamelCase__ ) original_model.eval() lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase_ = {"""type""": scaling_type, """factor""": 10.0} lowercase_ = OpenLlamaModel(UpperCamelCase__ ) scaled_model.to(UpperCamelCase__ ) scaled_model.eval() lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
650
1
import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() a = logging.get_logger(__name__) a = 'https://openaipublic.azureedge.net/jukebox/models/' a = { 'jukebox-1b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '1b_lyrics/prior_level_2.pth.tar', ], 'jukebox-5b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '5b_lyrics/prior_level_2.pth.tar', ], } def UpperCAmelCase_ ( UpperCAmelCase__ ): if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 1_0: lowercase_ = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" ) elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 1_0: lowercase_ = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" ) elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 1_0: lowercase_ = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" ) elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 1_0: lowercase_ = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" ) if "conditioner_blocks.0." in key: lowercase_ = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" ) if "prime_prior" in key: lowercase_ = key.replace("""prime_prior""" , """encoder""" ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: lowercase_ = key.replace(""".emb.""" , """.""" ) if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook return key.replace(""".k""" , """.codebook""" ) if "y_emb." in key: return key.replace("""y_emb.""" , """metadata_embedding.""" ) if "x_emb.emb." in key: lowercase_ = key.replace("""0.x_emb.emb""" , """embed_tokens""" ) if "prime_state_ln" in key: return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" ) if ".ln" in key: return key.replace(""".ln""" , """.layer_norm""" ) if "_ln" in key: return key.replace("""_ln""" , """_layer_norm""" ) if "prime_state_proj" in key: return key.replace("""prime_state_proj""" , """encoder.proj_in""" ) if "prime_x_out" in key: return key.replace("""prime_x_out""" , """encoder.lm_head""" ) if "prior.x_out" in key: return key.replace("""x_out""" , """fc_proj_out""" ) if "x_emb" in key: return key.replace("""x_emb""" , """embed_tokens""" ) return key def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = {} import re lowercase_ = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" ) lowercase_ = re.compile( r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" ) lowercase_ = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" ) lowercase_ = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" ) lowercase_ = re.compile( r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" ) lowercase_ = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" ) lowercase_ = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" ) lowercase_ = re.compile( r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" ) lowercase_ = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(UpperCAmelCase__ ): lowercase_ = re_encoder_block_conv_in.match(UpperCAmelCase__ ) lowercase_ = regex_match.groups() lowercase_ = int(groups[2] ) * 2 + int(groups[3] ) lowercase_ = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}''' lowercase_ = re_encoder_block_conv_in.sub(UpperCAmelCase__ , UpperCAmelCase__ ) elif re_encoder_block_resnet.fullmatch(UpperCAmelCase__ ): lowercase_ = re_encoder_block_resnet.match(UpperCAmelCase__ ) lowercase_ = regex_match.groups() lowercase_ = int(groups[2] ) * 2 + int(groups[3] ) lowercase_ = {"""1""": 1, """3""": 2}[groups[-2]] lowercase_ = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.''' lowercase_ = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowercase_ = prefix + resnet_block lowercase_ = re_encoder_block_resnet.sub(UpperCAmelCase__ , UpperCAmelCase__ ) elif re_encoder_block_proj_out.fullmatch(UpperCAmelCase__ ): lowercase_ = re_encoder_block_proj_out.match(UpperCAmelCase__ ) lowercase_ = regex_match.groups() lowercase_ = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}''' lowercase_ = re_encoder_block_proj_out.sub(UpperCAmelCase__ , UpperCAmelCase__ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(UpperCAmelCase__ ): lowercase_ = re_decoder_block_conv_out.match(UpperCAmelCase__ ) lowercase_ = regex_match.groups() lowercase_ = int(groups[2] ) * 2 + int(groups[3] ) - 2 lowercase_ = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}''' lowercase_ = re_decoder_block_conv_out.sub(UpperCAmelCase__ , UpperCAmelCase__ ) elif re_decoder_block_resnet.fullmatch(UpperCAmelCase__ ): lowercase_ = re_decoder_block_resnet.match(UpperCAmelCase__ ) lowercase_ = regex_match.groups() lowercase_ = int(groups[2] ) * 2 + int(groups[3] ) - 2 lowercase_ = {"""1""": 1, """3""": 2}[groups[-2]] lowercase_ = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.''' lowercase_ = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowercase_ = prefix + resnet_block lowercase_ = re_decoder_block_resnet.sub(UpperCAmelCase__ , UpperCAmelCase__ ) elif re_decoder_block_proj_in.fullmatch(UpperCAmelCase__ ): lowercase_ = re_decoder_block_proj_in.match(UpperCAmelCase__ ) lowercase_ = regex_match.groups() lowercase_ = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}''' lowercase_ = re_decoder_block_proj_in.sub(UpperCAmelCase__ , UpperCAmelCase__ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(UpperCAmelCase__ ): lowercase_ = re_prior_cond_conv_out.match(UpperCAmelCase__ ) lowercase_ = regex_match.groups() lowercase_ = int(groups[1] ) * 2 + int(groups[2] ) - 2 lowercase_ = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}''' lowercase_ = re_prior_cond_conv_out.sub(UpperCAmelCase__ , UpperCAmelCase__ ) elif re_prior_cond_resnet.fullmatch(UpperCAmelCase__ ): lowercase_ = re_prior_cond_resnet.match(UpperCAmelCase__ ) lowercase_ = regex_match.groups() lowercase_ = int(groups[1] ) * 2 + int(groups[2] ) - 2 lowercase_ = {"""1""": 1, """3""": 2}[groups[-2]] lowercase_ = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.''' lowercase_ = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowercase_ = prefix + resnet_block lowercase_ = re_prior_cond_resnet.sub(UpperCAmelCase__ , UpperCAmelCase__ ) elif re_prior_cond_proj_in.fullmatch(UpperCAmelCase__ ): lowercase_ = re_prior_cond_proj_in.match(UpperCAmelCase__ ) lowercase_ = regex_match.groups() lowercase_ = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}''' lowercase_ = re_prior_cond_proj_in.sub(UpperCAmelCase__ , UpperCAmelCase__ ) # keep original key else: lowercase_ = original_key lowercase_ = replace_key(UpperCAmelCase__ ) if F'''{key_prefix}.{key}''' not in model_state_dict or key is None: print(F'''failed converting {original_key} to {key}, does not match''' ) # handle missmatched shape elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape: lowercase_ = model_state_dict[F'''{key_prefix}.{key}'''] print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' ) lowercase_ = original_key lowercase_ = original_key lowercase_ = value return new_dict @torch.no_grad() def UpperCAmelCase_ ( UpperCAmelCase__=None , UpperCAmelCase__=None ): for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ): lowercase_ = requests.get(F'''{PREFIX}{file}''' , allow_redirects=UpperCAmelCase__ ) os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=UpperCAmelCase__ ) open(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , """wb""" ).write(r.content ) lowercase_ = MODEL_MAPPING[model_name.split("""/""" )[-1]] lowercase_ = JukeboxConfig.from_pretrained(UpperCAmelCase__ ) lowercase_ = JukeboxModel(UpperCAmelCase__ ) lowercase_ = [] lowercase_ = {} for i, dict_name in enumerate(UpperCAmelCase__ ): lowercase_ = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )["""model"""] lowercase_ = {} for k in old_dic.keys(): if k.endswith(""".b""" ): lowercase_ = old_dic[k] elif k.endswith(""".w""" ): lowercase_ = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: lowercase_ = old_dic[k] else: lowercase_ = old_dic[k] lowercase_ = """vqvae""" if i == 0 else F'''priors.{3 - i}''' lowercase_ = fix_jukebox_keys(UpperCAmelCase__ , model.state_dict() , UpperCAmelCase__ , UpperCAmelCase__ ) weight_dict.append(UpperCAmelCase__ ) lowercase_ = weight_dict.pop(0 ) model.vqvae.load_state_dict(UpperCAmelCase__ ) for i in range(len(UpperCAmelCase__ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) with open(F'''{pytorch_dump_folder_path}/mapping.json''' , """w""" ) as txtfile: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase__ ) return weight_dict if __name__ == "__main__": a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='jukebox-5b-lyrics', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='jukebox-5b-lyrics-converted', type=str, help='Path to the output PyTorch model directory.', ) a = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
650
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: a = False a = logging.get_logger(__name__) a = 'ybelkada/fonts' def UpperCAmelCase_ ( ): if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ''' """Pix2StructImageProcessor. Please upgrade torch.""" ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): requires_backends(UpperCAmelCase__ , ["""torch"""] ) _check_torch_version() lowercase_ = image_tensor.unsqueeze(0 ) lowercase_ = torch.nn.functional.unfold(UpperCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) ) lowercase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase__ , UpperCAmelCase__ , -1 ) lowercase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 3_6 , UpperCAmelCase__ = "black" , UpperCAmelCase__ = "white" , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , ): requires_backends(UpperCAmelCase__ , """vision""" ) # Add new lines so that each line is no more than 80 characters. lowercase_ = textwrap.TextWrapper(width=8_0 ) lowercase_ = wrapper.wrap(text=UpperCAmelCase__ ) lowercase_ = """\n""".join(UpperCAmelCase__ ) if font_bytes is not None and font_path is None: lowercase_ = io.BytesIO(UpperCAmelCase__ ) elif font_path is not None: lowercase_ = font_path else: lowercase_ = hf_hub_download(UpperCAmelCase__ , """Arial.TTF""" ) lowercase_ = ImageFont.truetype(UpperCAmelCase__ , encoding="""UTF-8""" , size=UpperCAmelCase__ ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. lowercase_ = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , UpperCAmelCase__ ) ) lowercase_ , lowercase_ , lowercase_ , lowercase_ = temp_draw.textbbox((0, 0) , UpperCAmelCase__ , UpperCAmelCase__ ) # Create the actual image with a bit of padding around the text. lowercase_ = text_width + left_padding + right_padding lowercase_ = text_height + top_padding + bottom_padding lowercase_ = Image.new("""RGB""" , (image_width, image_height) , UpperCAmelCase__ ) lowercase_ = ImageDraw.Draw(UpperCAmelCase__ ) draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase__ , fill=UpperCAmelCase__ , font=UpperCAmelCase__ ) return image def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ): requires_backends(UpperCAmelCase__ , """vision""" ) # Convert to PIL image if necessary lowercase_ = to_pil_image(UpperCAmelCase__ ) lowercase_ = render_text(UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase_ = max(header_image.width , image.width ) lowercase_ = int(image.height * (new_width / image.width) ) lowercase_ = int(header_image.height * (new_width / header_image.width) ) lowercase_ = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary lowercase_ = to_numpy_array(UpperCAmelCase__ ) if infer_channel_dimension_format(UpperCAmelCase__ ) == ChannelDimension.LAST: lowercase_ = to_channel_dimension_format(UpperCAmelCase__ , ChannelDimension.LAST ) return new_image class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Tuple = ['flattened_patches'] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 2_048 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} lowercase_ = do_normalize lowercase_ = do_convert_rgb lowercase_ = max_patches lowercase_ = is_vqa def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : dict , **UpperCamelCase__ : Optional[int] ): '''simple docstring''' requires_backends(self.extract_flattened_patches , """torch""" ) _check_torch_version() # convert to torch lowercase_ = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST ) lowercase_ = torch.from_numpy(UpperCamelCase__ ) lowercase_ , lowercase_ = patch_size["""height"""], patch_size["""width"""] lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ ) # maximize scale s.t. lowercase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) lowercase_ = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 ) lowercase_ = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 ) lowercase_ = max(num_feasible_rows * patch_height , 1 ) lowercase_ = max(num_feasible_cols * patch_width , 1 ) lowercase_ = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] lowercase_ = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = patches.shape lowercase_ = patches_shape[1] lowercase_ = patches_shape[2] lowercase_ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] lowercase_ = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] lowercase_ = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] ) lowercase_ = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] lowercase_ = row_ids.to(torch.floataa ) lowercase_ = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] lowercase_ = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] lowercase_ = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float() lowercase_ = to_numpy_array(UpperCamelCase__ ) return result def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict ): '''simple docstring''' if image.dtype == np.uinta: lowercase_ = image.astype(np.floataa ) # take mean across the whole `image` lowercase_ = np.mean(UpperCamelCase__ ) lowercase_ = np.std(UpperCamelCase__ ) lowercase_ = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ): '''simple docstring''' lowercase_ = do_normalize if do_normalize is not None else self.do_normalize lowercase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase_ = patch_size if patch_size is not None else self.patch_size lowercase_ = max_patches if max_patches is not None else self.max_patches lowercase_ = self.is_vqa if kwargs.get("""data_format""" , UpperCamelCase__ ) is not None: raise ValueError("""data_format is not an accepted input as the outputs are """ ) lowercase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase_ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if is_vqa: if header_text is None: raise ValueError("""A header text must be provided for VQA models.""" ) lowercase_ = kwargs.pop("""font_bytes""" , UpperCamelCase__ ) lowercase_ = kwargs.pop("""font_path""" , UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = [header_text] * len(UpperCamelCase__ ) lowercase_ = [ render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ ) for i, image in enumerate(UpperCamelCase__ ) ] if do_normalize: lowercase_ = [self.normalize(image=UpperCamelCase__ ) for image in images] # convert to torch tensor and permute lowercase_ = [ self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ ) for image in images ] # create attention mask in numpy lowercase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] lowercase_ = BatchFeature( data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=UpperCamelCase__ ) return encoded_outputs
650
1
import doctest from collections import deque import numpy as np class UpperCamelCase__ : def __init__( self : List[Any] ): '''simple docstring''' lowercase_ = [2, 1, 2, -1] lowercase_ = [1, 2, 3, 4] def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = len(self.first_signal ) lowercase_ = len(self.second_signal ) lowercase_ = max(UpperCamelCase__ , UpperCamelCase__ ) # create a zero matrix of max_length x max_length lowercase_ = [[0] * max_length for i in range(UpperCamelCase__ )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(UpperCamelCase__ ): lowercase_ = deque(self.second_signal ) rotated_signal.rotate(UpperCamelCase__ ) for j, item in enumerate(UpperCamelCase__ ): matrix[i][j] += item # multiply the matrix with the first signal lowercase_ = np.matmul(np.transpose(UpperCamelCase__ ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(UpperCamelCase__ , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
650
import cva import numpy as np class UpperCamelCase__ : def __init__( self : List[str] , UpperCamelCase__ : float , UpperCamelCase__ : int ): '''simple docstring''' if k in (0.04, 0.06): lowercase_ = k lowercase_ = window_size else: raise ValueError("""invalid k value""" ) def __str__( self : Optional[int] ): '''simple docstring''' return str(self.k ) def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ): '''simple docstring''' lowercase_ = cva.imread(UpperCamelCase__ , 0 ) lowercase_ , lowercase_ = img.shape lowercase_ = [] lowercase_ = img.copy() lowercase_ = cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB ) lowercase_ , lowercase_ = np.gradient(UpperCamelCase__ ) lowercase_ = dx**2 lowercase_ = dy**2 lowercase_ = dx * dy lowercase_ = 0.04 lowercase_ = self.window_size // 2 for y in range(UpperCamelCase__ , h - offset ): for x in range(UpperCamelCase__ , w - offset ): lowercase_ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase_ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase_ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase_ = (wxx * wyy) - (wxy**2) lowercase_ = wxx + wyy lowercase_ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": a = HarrisCorner(0.04, 3) a , a = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
650
1
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () a = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). a = [0, 2_5, 5_0] a = [2_5, 5_0, 7_5] a = fuzz.membership.trimf(X, abca) a = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. a = np.ones(7_5) a = np.zeros((7_5,)) # 1. Union = max(µA(x), µB(x)) a = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) a = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) a = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) a = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] a = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) a = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] a = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] a = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('Young') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('Middle aged') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('union') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('intersection') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('complement_a') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('difference a/b') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('alg_sum') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('alg_product') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('bdd_sum') plt.grid(True) plt.subplot(4, 3, 1_0) plt.plot(X, bdd_difference) plt.title('bdd_difference') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
650
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): a = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: a = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = (images / 2 + 0.5).clamp(0 , 1 ) lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowercase_ = numpy_to_pil(UpperCAmelCase__ ) return images def UpperCAmelCase_ ( UpperCAmelCase__ ): if images.ndim == 3: lowercase_ = images[None, ...] lowercase_ = (images * 2_5_5).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowercase_ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images] else: lowercase_ = [Image.fromarray(UpperCAmelCase__ ) for image in images] return pil_images
650
1
def UpperCAmelCase_ ( ): lowercase_ = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1] lowercase_ = 6 lowercase_ = 1 lowercase_ = 1_9_0_1 lowercase_ = 0 while year < 2_0_0_1: day += 7 if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 lowercase_ = day - days_per_month[month - 2] elif day > 2_9 and month == 2: month += 1 lowercase_ = day - 2_9 else: if day > days_per_month[month - 1]: month += 1 lowercase_ = day - days_per_month[month - 2] if month > 1_2: year += 1 lowercase_ = 1 if year < 2_0_0_1 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
650
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : str = (UnCLIPScheduler,) def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : int ): '''simple docstring''' lowercase_ = { """num_train_timesteps""": 1_000, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**UpperCamelCase__ ) return config def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config(variance_type="""fixed_small_log""" ) lowercase_ = scheduler_class(**UpperCamelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5 def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config(variance_type="""learned_range""" ) lowercase_ = scheduler_class(**UpperCamelCase__ ) lowercase_ = 0.5 assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1e-5 def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config() lowercase_ = scheduler_class(**UpperCamelCase__ ) lowercase_ = scheduler.timesteps lowercase_ = self.dummy_model() lowercase_ = self.dummy_sample_deter lowercase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ ) # 2. predict previous mean of sample x_t-1 lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample lowercase_ = pred_prev_sample lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3 def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config() lowercase_ = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(25 ) lowercase_ = scheduler.timesteps lowercase_ = self.dummy_model() lowercase_ = self.dummy_sample_deter lowercase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ ) if i + 1 == timesteps.shape[0]: lowercase_ = None else: lowercase_ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowercase_ = scheduler.step( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample lowercase_ = pred_prev_sample lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3 def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' pass def UpperCAmelCase__ ( self : int ): '''simple docstring''' pass
650
1
from __future__ import annotations import math def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): if len(UpperCAmelCase__ ) != 2 or len(a[0] ) != 2 or len(UpperCAmelCase__ ) != 2 or len(b[0] ) != 2: raise Exception("""Matrices are not 2x2""" ) lowercase_ = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(UpperCAmelCase__ ) ) ] def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(UpperCAmelCase__ ) ) ] def UpperCAmelCase_ ( UpperCAmelCase__ ): if len(UpperCAmelCase__ ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception("""Odd matrices are not supported!""" ) lowercase_ = len(UpperCAmelCase__ ) lowercase_ = matrix_length // 2 lowercase_ = [[a[i][j] for j in range(UpperCAmelCase__ , UpperCAmelCase__ )] for i in range(UpperCAmelCase__ )] lowercase_ = [ [a[i][j] for j in range(UpperCAmelCase__ , UpperCAmelCase__ )] for i in range(UpperCAmelCase__ , UpperCAmelCase__ ) ] lowercase_ = [[a[i][j] for j in range(UpperCAmelCase__ )] for i in range(UpperCAmelCase__ )] lowercase_ = [[a[i][j] for j in range(UpperCAmelCase__ )] for i in range(UpperCAmelCase__ , UpperCAmelCase__ )] return top_left, top_right, bot_left, bot_right def UpperCAmelCase_ ( UpperCAmelCase__ ): return len(UpperCAmelCase__ ), len(matrix[0] ) def UpperCAmelCase_ ( UpperCAmelCase__ ): print("""\n""".join(str(UpperCAmelCase__ ) for line in matrix ) ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): if matrix_dimensions(UpperCAmelCase__ ) == (2, 2): return default_matrix_multiplication(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ , lowercase_ , lowercase_ , lowercase_ = split_matrix(UpperCAmelCase__ ) lowercase_ , lowercase_ , lowercase_ , lowercase_ = split_matrix(UpperCAmelCase__ ) lowercase_ = actual_strassen(UpperCAmelCase__ , matrix_subtraction(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase_ = actual_strassen(matrix_addition(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ ) lowercase_ = actual_strassen(matrix_addition(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ ) lowercase_ = actual_strassen(UpperCAmelCase__ , matrix_subtraction(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase_ = actual_strassen(matrix_addition(UpperCAmelCase__ , UpperCAmelCase__ ) , matrix_addition(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase_ = actual_strassen(matrix_subtraction(UpperCAmelCase__ , UpperCAmelCase__ ) , matrix_addition(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase_ = actual_strassen(matrix_subtraction(UpperCAmelCase__ , UpperCAmelCase__ ) , matrix_addition(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase_ = matrix_addition(matrix_subtraction(matrix_addition(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ ) , UpperCAmelCase__ ) lowercase_ = matrix_addition(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ = matrix_addition(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ = matrix_subtraction(matrix_subtraction(matrix_addition(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ ) , UpperCAmelCase__ ) # construct the new matrix from our 4 quadrants lowercase_ = [] for i in range(len(UpperCAmelCase__ ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(UpperCAmelCase__ ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): if matrix_dimensions(UpperCAmelCase__ )[1] != matrix_dimensions(UpperCAmelCase__ )[0]: lowercase_ = ( """Unable to multiply these matrices, please check the dimensions.\n""" F'''Matrix A: {matrixa}\n''' F'''Matrix B: {matrixa}''' ) raise Exception(UpperCAmelCase__ ) lowercase_ = matrix_dimensions(UpperCAmelCase__ ) lowercase_ = matrix_dimensions(UpperCAmelCase__ ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] lowercase_ = max(*UpperCAmelCase__ , *UpperCAmelCase__ ) lowercase_ = int(math.pow(2 , math.ceil(math.loga(UpperCAmelCase__ ) ) ) ) lowercase_ = matrixa lowercase_ = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , UpperCAmelCase__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , UpperCAmelCase__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , UpperCAmelCase__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) lowercase_ = actual_strassen(UpperCAmelCase__ , UpperCAmelCase__ ) # Removing the additional zeros for i in range(0 , UpperCAmelCase__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , UpperCAmelCase__ ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": a = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] a = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]] print(strassen(matrixa, matrixa))
650
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : Optional[str] = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the training data.'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the validation data.'} ) __SCREAMING_SNAKE_CASE : Optional[float] = field( default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} ) __SCREAMING_SNAKE_CASE : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} ) __SCREAMING_SNAKE_CASE : float = field( default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = {} if self.train_dir is not None: lowercase_ = self.train_dir if self.validation_dir is not None: lowercase_ = self.validation_dir lowercase_ = data_files if data_files else None @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : str = field( default=__magic_name__ , metadata={ 'help': ( 'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ' 'checkpoint identifier on the hub. ' 'Don\'t set if you want to train a model from scratch.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__magic_name__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , ) __SCREAMING_SNAKE_CASE : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) __SCREAMING_SNAKE_CASE : str = field(default=__magic_name__ , metadata={'help': 'Name or path of preprocessor config.'} ) __SCREAMING_SNAKE_CASE : bool = field( default=__magic_name__ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={ 'help': ( 'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__magic_name__ , metadata={'help': 'Stride to use for the encoder.'} , ) class UpperCamelCase__ : def __init__( self : Dict , UpperCamelCase__ : List[Any]=192 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : str=0.6 ): '''simple docstring''' lowercase_ = input_size lowercase_ = mask_patch_size lowercase_ = model_patch_size lowercase_ = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("""Input size must be divisible by mask patch size""" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("""Mask patch size must be divisible by model patch size""" ) lowercase_ = self.input_size // self.mask_patch_size lowercase_ = self.mask_patch_size // self.model_patch_size lowercase_ = self.rand_size**2 lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : int ): '''simple docstring''' lowercase_ = np.random.permutation(self.token_count )[: self.mask_count] lowercase_ = np.zeros(self.token_count , dtype=UpperCamelCase__ ) lowercase_ = 1 lowercase_ = mask.reshape((self.rand_size, self.rand_size) ) lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = torch.stack([example["""pixel_values"""] for example in examples] ) lowercase_ = torch.stack([example["""mask"""] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def UpperCAmelCase_ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mim""" , UpperCAmelCase__ , UpperCAmelCase__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase_ = training_args.get_process_log_level() logger.setLevel(UpperCAmelCase__ ) transformers.utils.logging.set_verbosity(UpperCAmelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowercase_ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase_ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. lowercase_ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. lowercase_ = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0: lowercase_ = ds["""train"""].train_test_split(data_args.train_val_split ) lowercase_ = split["""train"""] lowercase_ = split["""test"""] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase_ = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase__ ) elif model_args.model_name_or_path: lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ ) else: lowercase_ = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(UpperCAmelCase__ , """decoder_type""" ): lowercase_ = """simmim""" # adapt config lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size lowercase_ = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { """image_size""": model_args.image_size, """patch_size""": model_args.patch_size, """encoder_stride""": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ ) elif model_args.model_name_or_path: lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ ) else: lowercase_ = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: lowercase_ = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) lowercase_ = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase__ ) if training_args.do_train: lowercase_ = ds["""train"""].column_names else: lowercase_ = ds["""validation"""].column_names if data_args.image_column_name is not None: lowercase_ = data_args.image_column_name elif "image" in column_names: lowercase_ = """image""" elif "img" in column_names: lowercase_ = """img""" else: lowercase_ = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py lowercase_ = Compose( [ Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator lowercase_ = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(UpperCAmelCase__ ): lowercase_ = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]] lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: lowercase_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(UpperCAmelCase__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: lowercase_ = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(UpperCAmelCase__ ) # Initialize our trainer lowercase_ = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: lowercase_ = None if training_args.resume_from_checkpoint is not None: lowercase_ = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase_ = last_checkpoint lowercase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowercase_ = trainer.evaluate() trainer.log_metrics("""eval""" , UpperCAmelCase__ ) trainer.save_metrics("""eval""" , UpperCAmelCase__ ) # Write model card and (optionally) push to hub lowercase_ = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """masked-image-modeling""", """dataset""": data_args.dataset_name, """tags""": ["""masked-image-modeling"""], } if training_args.push_to_hub: trainer.push_to_hub(**UpperCAmelCase__ ) else: trainer.create_model_card(**UpperCAmelCase__ ) if __name__ == "__main__": main()
650
1
import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=1_0_2_4 ): lowercase_ , lowercase_ = [], [] lowercase_ = list(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase_ , lowercase_ = sorted_examples[0] def is_too_big(UpperCAmelCase__ ): return tok(UpperCAmelCase__ , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): lowercase_ = new_src + """ """ + src lowercase_ = new_tgt + """ """ + tgt if is_too_big(UpperCAmelCase__ ) or is_too_big(UpperCAmelCase__ ): # cant fit, finalize example finished_src.append(UpperCAmelCase__ ) finished_tgt.append(UpperCAmelCase__ ) lowercase_ , lowercase_ = src, tgt else: # can fit, keep adding lowercase_ , lowercase_ = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(UpperCAmelCase__ ) finished_tgt.append(UpperCAmelCase__ ) return finished_src, finished_tgt def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = Path(UpperCAmelCase__ ) save_path.mkdir(exist_ok=UpperCAmelCase__ ) for split in ["train"]: lowercase_ , lowercase_ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target''' lowercase_ = [x.rstrip() for x in Path(UpperCAmelCase__ ).open().readlines()] lowercase_ = [x.rstrip() for x in Path(UpperCAmelCase__ ).open().readlines()] lowercase_ , lowercase_ = pack_examples(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) print(F'''packed {split} split from {len(UpperCAmelCase__ )} examples -> {len(UpperCAmelCase__ )}.''' ) Path(save_path / F'''{split}.source''' ).open("""w""" ).write("""\n""".join(UpperCAmelCase__ ) ) Path(save_path / F'''{split}.target''' ).open("""w""" ).write("""\n""".join(UpperCAmelCase__ ) ) for split in ["val", "test"]: lowercase_ , lowercase_ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target''' shutil.copyfile(UpperCAmelCase__ , save_path / F'''{split}.source''' ) shutil.copyfile(UpperCAmelCase__ , save_path / F'''{split}.target''' ) def UpperCAmelCase_ ( ): lowercase_ = argparse.ArgumentParser() parser.add_argument("""--tok_name""" , type=UpperCAmelCase__ , help="""like facebook/bart-large-cnn,t5-base, etc.""" ) parser.add_argument("""--max_seq_len""" , type=UpperCAmelCase__ , default=1_2_8 ) parser.add_argument("""--data_dir""" , type=UpperCAmelCase__ ) parser.add_argument("""--save_path""" , type=UpperCAmelCase__ ) lowercase_ = parser.parse_args() lowercase_ = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(UpperCAmelCase__ , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
650
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging a = logging.get_logger(__name__) class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values'] def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Dict , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = size if size is not None else {"""shortest_edge""": 224} lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" ) lowercase_ = do_resize lowercase_ = size lowercase_ = resample lowercase_ = do_center_crop lowercase_ = crop_size lowercase_ = do_rescale lowercase_ = rescale_factor lowercase_ = do_normalize lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowercase_ = int((256 / 224) * size["""shortest_edge"""] ) lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ ) lowercase_ = {"""height""": output_size[0], """width""": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ): '''simple docstring''' lowercase_ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ): '''simple docstring''' return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' lowercase_ = do_resize if do_resize is not None else self.do_resize lowercase_ = resample if resample is not None else self.resample lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase_ = do_rescale if do_rescale is not None else self.do_rescale lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ = do_normalize if do_normalize is not None else self.do_normalize lowercase_ = image_mean if image_mean is not None else self.image_mean lowercase_ = image_std if image_std is not None else self.image_std lowercase_ = size if size is not None else self.size lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) lowercase_ = crop_size if crop_size is not None else self.crop_size lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" ) lowercase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: lowercase_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images] if do_center_crop: lowercase_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images] if do_rescale: lowercase_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images] if do_normalize: lowercase_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images] lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] lowercase_ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
650
1