code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCAmelCase: Dict = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase: Optional[Any] = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase: int = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys _lowerCAmelCase: str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
20
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : int = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Tuple = replicate(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = shard(_A ) __SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2''' __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained( _A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : List[str] = scheduler_params __SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A ) __SCREAMING_SNAKE_CASE : List[str] = shard(_A ) __SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
74
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging UpperCAmelCase_ : Any = logging.get_logger(__name__) UpperCAmelCase_ : Any = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class __A ( UpperCamelCase__ ): UpperCamelCase = """blenderbot-small""" UpperCamelCase = ["""past_key_values"""] UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self :Any , __snake_case :Optional[int]=5_02_65 , __snake_case :Any=5_12 , __snake_case :Tuple=8 , __snake_case :Optional[Any]=20_48 , __snake_case :List[Any]=16 , __snake_case :Any=8 , __snake_case :Union[str, Any]=20_48 , __snake_case :Any=16 , __snake_case :List[str]=0.0 , __snake_case :Dict=0.0 , __snake_case :str=True , __snake_case :Optional[int]=True , __snake_case :Optional[int]="gelu" , __snake_case :Dict=5_12 , __snake_case :Optional[Any]=0.1 , __snake_case :Tuple=0.0 , __snake_case :Optional[Any]=0.0 , __snake_case :Optional[int]=0.02 , __snake_case :Optional[int]=1 , __snake_case :str=False , __snake_case :List[Any]=0 , __snake_case :int=1 , __snake_case :List[Any]=2 , __snake_case :Optional[Any]=2 , **__snake_case :str , ): '''simple docstring''' __magic_name__ : int =vocab_size __magic_name__ : Optional[Any] =max_position_embeddings __magic_name__ : Optional[Any] =d_model __magic_name__ : str =encoder_ffn_dim __magic_name__ : Tuple =encoder_layers __magic_name__ : List[str] =encoder_attention_heads __magic_name__ : Union[str, Any] =decoder_ffn_dim __magic_name__ : int =decoder_layers __magic_name__ : Tuple =decoder_attention_heads __magic_name__ : Tuple =dropout __magic_name__ : List[str] =attention_dropout __magic_name__ : int =activation_dropout __magic_name__ : Union[str, Any] =activation_function __magic_name__ : Any =init_std __magic_name__ : Any =encoder_layerdrop __magic_name__ : Optional[int] =decoder_layerdrop __magic_name__ : int =use_cache __magic_name__ : Dict =encoder_layers __magic_name__ : Tuple =scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , ) class __A ( UpperCamelCase__ ): @property def A__ ( self :Any ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __magic_name__ : List[Any] =OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: __magic_name__ : Tuple ={0: """batch"""} __magic_name__ : Tuple ={0: """batch""", 1: """past_decoder_sequence + sequence"""} else: __magic_name__ : Optional[Any] ={0: """batch""", 1: """decoder_sequence"""} __magic_name__ : Union[str, Any] ={0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(__snake_case , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. __magic_name__ : Optional[Any] =OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: __magic_name__ , __magic_name__ : List[Any] =self.num_layers for i in range(__snake_case ): __magic_name__ : Optional[int] ={0: """batch""", 2: """past_sequence + sequence"""} __magic_name__ : str ={0: """batch""", 2: """past_sequence + sequence"""} else: __magic_name__ : Dict =OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property def A__ ( self :Any ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __magic_name__ : List[Any] =super().outputs else: __magic_name__ : Optional[Any] =super(__snake_case , self ).outputs if self.use_past: __magic_name__ , __magic_name__ : int =self.num_layers for i in range(__snake_case ): __magic_name__ : Optional[int] ={0: """batch""", 2: """past_sequence + sequence"""} __magic_name__ : str ={0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def A__ ( self :List[Any] , __snake_case :PreTrainedTokenizer , __snake_case :int = -1 , __snake_case :int = -1 , __snake_case :bool = False , __snake_case :Optional[TensorType] = None , ): '''simple docstring''' __magic_name__ : str =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # Generate decoder inputs __magic_name__ : List[str] =seq_length if not self.use_past else 1 __magic_name__ : Any =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) __magic_name__ : Any ={f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} __magic_name__ : List[str] =dict(**__snake_case , **__snake_case ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __magic_name__ , __magic_name__ : int =common_inputs["""input_ids"""].shape __magic_name__ : Any =common_inputs["""decoder_input_ids"""].shape[1] __magic_name__ , __magic_name__ : Optional[int] =self.num_attention_heads __magic_name__ : int =( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __magic_name__ : List[Any] =decoder_seq_length + 3 __magic_name__ : List[Any] =( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __magic_name__ : Any =torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(__snake_case , __snake_case )] , dim=1 ) __magic_name__ : Optional[Any] =[] # If the number of encoder and decoder layers are present in the model configuration, both are considered __magic_name__ , __magic_name__ : Dict =self.num_layers __magic_name__ : List[Any] =min(__snake_case , __snake_case ) __magic_name__ : List[Any] =max(__snake_case , __snake_case ) - min_num_layers __magic_name__ : Optional[Any] ="""encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(__snake_case ): common_inputs["past_key_values"].append( ( torch.zeros(__snake_case ), torch.zeros(__snake_case ), torch.zeros(__snake_case ), torch.zeros(__snake_case ), ) ) # TODO: test this. __magic_name__ : int =encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(__snake_case , __snake_case ): common_inputs["past_key_values"].append((torch.zeros(__snake_case ), torch.zeros(__snake_case )) ) return common_inputs def A__ ( self :List[str] , __snake_case :PreTrainedTokenizer , __snake_case :int = -1 , __snake_case :int = -1 , __snake_case :bool = False , __snake_case :Optional[TensorType] = None , ): '''simple docstring''' __magic_name__ : Optional[Any] =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __magic_name__ , __magic_name__ : int =common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __magic_name__ : int =seqlen + 2 __magic_name__ , __magic_name__ : Tuple =self.num_layers __magic_name__ , __magic_name__ : List[str] =self.num_attention_heads __magic_name__ : List[Any] =( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __magic_name__ : Union[str, Any] =common_inputs["""attention_mask"""].dtype __magic_name__ : int =torch.cat( [common_inputs["""attention_mask"""], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 ) __magic_name__ : Dict =[ (torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(__snake_case ) ] return common_inputs def A__ ( self :Any , __snake_case :PreTrainedTokenizer , __snake_case :int = -1 , __snake_case :int = -1 , __snake_case :bool = False , __snake_case :Optional[TensorType] = None , ): '''simple docstring''' __magic_name__ : Tuple =compute_effective_axis_dimension( __snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __magic_name__ : Any =tokenizer.num_special_tokens_to_add(__snake_case ) __magic_name__ : Optional[Any] =compute_effective_axis_dimension( __snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case ) # Generate dummy inputs according to compute batch and sequence __magic_name__ : List[str] =[""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size __magic_name__ : int =dict(tokenizer(__snake_case , return_tensors=__snake_case ) ) return common_inputs def A__ ( self :Optional[Any] , __snake_case :PreTrainedTokenizer , __snake_case :int = -1 , __snake_case :int = -1 , __snake_case :bool = False , __snake_case :Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __magic_name__ : Any =self._generate_dummy_inputs_for_default_and_seqaseq_lm( __snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case ) elif self.task == "causal-lm": __magic_name__ : List[Any] =self._generate_dummy_inputs_for_causal_lm( __snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case ) else: __magic_name__ : int =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case ) return common_inputs def A__ ( self :List[str] , __snake_case :Any , __snake_case :Dict , __snake_case :Any , __snake_case :Any ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __magic_name__ : Union[str, Any] =super()._flatten_past_key_values_(__snake_case , __snake_case , __snake_case , __snake_case ) else: __magic_name__ : Optional[Any] =super(__snake_case , self )._flatten_past_key_values_( __snake_case , __snake_case , __snake_case , __snake_case )
21
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase_ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2FeatureExtractor"""] lowercase_ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _snake_case : List[str] = { 'configuration_swiftformer': [ 'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwiftFormerConfig', 'SwiftFormerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Union[str, Any] = [ 'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys _snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
22
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileBertTokenizer lowerCAmelCase_ = MobileBertTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = filter_non_english lowerCAmelCase_ = '''google/mobilebert-uncased''' def UpperCAmelCase__ ( self : Dict ): """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __SCREAMING_SNAKE_CASE : int = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running''' return input_text, output_text def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase__ ( self : int ): """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) # With lower casing __SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __SCREAMING_SNAKE_CASE : Dict = {} for i, token in enumerate(_A ): __SCREAMING_SNAKE_CASE : List[str] = i __SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def UpperCAmelCase__ ( self : str ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus( _A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False __SCREAMING_SNAKE_CASE : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有'''] __SCREAMING_SNAKE_CASE : int = ''''''.join(_A ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE : List[Any] = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A ) ] self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A )
74
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case__ : Optional[Any] = logging.get_logger(__name__) snake_case__ : Optional[Any] = {"""vocab_file""": """spiece.model"""} snake_case__ : Dict = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } snake_case__ : Tuple = {"""bert_for_seq_generation""": 5_1_2} class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = [] A_ = ["""input_ids""", """attention_mask"""] def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<::::>" , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None: UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) UpperCamelCase_ = vocab_file UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCAmelCase ) @property def _UpperCAmelCase ( self ) -> Union[str, Any]: return self.sp_model.get_piece_size() def _UpperCAmelCase ( self ) -> int: UpperCamelCase_ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> str: UpperCamelCase_ = self.__dict__.copy() UpperCamelCase_ = None return state def __setstate__( self , _UpperCAmelCase ) -> List[str]: UpperCamelCase_ = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): UpperCamelCase_ = {} UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]: return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Union[str, Any]: return self.sp_model.piece_to_id(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[int]: UpperCamelCase_ = self.sp_model.IdToPiece(_UpperCAmelCase ) return token def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Union[str, Any]: UpperCamelCase_ = [] UpperCamelCase_ = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_UpperCAmelCase ) + token UpperCamelCase_ = [] else: current_sub_tokens.append(_UpperCAmelCase ) out_string += self.sp_model.decode(_UpperCAmelCase ) return out_string.strip() def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(_UpperCAmelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase_ = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , 'wb' ) as fi: UpperCamelCase_ = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,)
23
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ): """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
74
0
'''simple docstring''' import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: UpperCAmelCase_ : Any = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowerCAmelCase ( unittest.TestCase): def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ) -> int: '''simple docstring''' __snake_case = size if size is not None else {'''height''': 20, '''width''': 20} __snake_case = parent __snake_case = batch_size __snake_case = num_channels __snake_case = image_size __snake_case = min_resolution __snake_case = max_resolution __snake_case = size __snake_case = do_normalize __snake_case = do_convert_rgb __snake_case = [512, 1024, 2048, 4096] __snake_case = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16} def lowerCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def lowerCAmelCase ( self ) -> str: '''simple docstring''' __snake_case = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg''' __snake_case = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase): __lowercase : str = PixaStructImageProcessor if is_vision_available() else None def lowerCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' __snake_case = PixaStructImageProcessingTester(self ) @property def lowerCAmelCase ( self ) -> Dict: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) ) def lowerCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case = self.image_processor_tester.prepare_dummy_image() __snake_case = self.image_processing_class(**self.image_processor_dict ) __snake_case = 2048 __snake_case = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1E-3 , rtol=1E-3 ) ) def lowerCAmelCase ( self ) -> int: '''simple docstring''' __snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input __snake_case = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __snake_case = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __snake_case = image_processor( __SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowerCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input __snake_case = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 __snake_case = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(__SCREAMING_SNAKE_CASE ): __snake_case = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches __snake_case = '''Hello''' __snake_case = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __snake_case = image_processor( __SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowerCAmelCase ( self ) -> int: '''simple docstring''' __snake_case = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray ) __snake_case = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __snake_case = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __snake_case = image_processor( __SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowerCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input __snake_case = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __snake_case = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __snake_case = image_processor( __SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase): __lowercase : int = PixaStructImageProcessor if is_vision_available() else None def lowerCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case = PixaStructImageProcessingTester(self , num_channels=4 ) __snake_case = 3 @property def lowerCAmelCase ( self ) -> Tuple: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' __snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) ) def lowerCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input __snake_case = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __snake_case = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __snake_case = image_processor( __SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
24
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast lowercase_ = datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase_ = 1_00_00 lowerCAmelCase_ = None lowerCAmelCase_ = None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase_ = ParquetConfig def UpperCAmelCase__ ( self : Any ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ): """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): __SCREAMING_SNAKE_CASE : Tuple = data_files if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __SCREAMING_SNAKE_CASE : int = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_A ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) ) break splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) ) return splits def UpperCAmelCase__ ( self : str , _A : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' ) raise
74
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
25
from math import isclose, sqrt def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x __SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4 __SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100 __SCREAMING_SNAKE_CASE : str = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __SCREAMING_SNAKE_CASE : int = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus __SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a__ ( snake_case = 1.4 , snake_case = -9.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : float = first_x_coord __SCREAMING_SNAKE_CASE : float = first_y_coord __SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
74
0
'''simple docstring''' import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _A ( __lowercase , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class _A ( unittest.TestCase ): @property def lowercase__ ( self : str ) -> Optional[Any]: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowercase__ ( self : int ) -> Optional[Any]: """simple docstring""" __snake_case : str = ort.SessionOptions() __snake_case : Optional[Any] = False return options def lowercase__ ( self : Any ) -> Tuple: """simple docstring""" __snake_case : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) __snake_case : List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) __snake_case : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained( """runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : str = """A red cat sitting on a park bench""" __snake_case : List[str] = np.random.RandomState(0 ) __snake_case : Optional[int] = pipe( prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , guidance_scale=7.5 , num_inference_steps=10 , generator=__magic_name__ , output_type="""np""" , ) __snake_case : Tuple = output.images __snake_case : Dict = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) __snake_case : str = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowercase__ ( self : Union[str, Any] ) -> str: """simple docstring""" __snake_case : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) __snake_case : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) __snake_case : List[str] = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" ) __snake_case : List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained( """runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : Union[str, Any] = """A red cat sitting on a park bench""" __snake_case : Union[str, Any] = np.random.RandomState(0 ) __snake_case : Optional[Any] = pipe( prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , guidance_scale=7.5 , num_inference_steps=20 , generator=__magic_name__ , output_type="""np""" , ) __snake_case : Optional[int] = output.images __snake_case : Dict = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) __snake_case : Dict = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
26
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20} __SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : int = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : Optional[Any] = num_channels __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : int = min_resolution __SCREAMING_SNAKE_CASE : Optional[int] = max_resolution __SCREAMING_SNAKE_CASE : List[Any] = do_resize __SCREAMING_SNAKE_CASE : Union[str, Any] = size __SCREAMING_SNAKE_CASE : str = do_center_crop __SCREAMING_SNAKE_CASE : Any = crop_size def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''crop_size''' ) ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : int ): """simple docstring""" pass def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
74
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCAmelCase__ ( self ): _A = 1 _A = 3 _A = (32, 32) _A = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case_ ) return image @property def lowerCAmelCase__ ( self ): torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=snake_case_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def lowerCAmelCase__ ( self ): torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def lowerCAmelCase__ ( self ): torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) return CLIPTextModel(snake_case_ ) def lowerCAmelCase__ ( self ): _A = 'cpu' # ensure determinism for the device-dependent torch.Generator _A = self.dummy_cond_unet_upscale _A = DDPMScheduler() _A = DDIMScheduler(prediction_type='v_prediction' ) _A = self.dummy_vae _A = self.dummy_text_encoder _A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _A = Image.fromarray(np.uinta(snake_case_ ) ).convert('RGB' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _A = StableDiffusionUpscalePipeline( unet=snake_case_ , low_res_scheduler=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , max_noise_level=350 , ) _A = sd_pipe.to(snake_case_ ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) _A = 'A painting of a squirrel eating a burger' _A = torch.Generator(device=snake_case_ ).manual_seed(0 ) _A = sd_pipe( [prompt] , image=snake_case_ , generator=snake_case_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , ) _A = output.images _A = torch.Generator(device=snake_case_ ).manual_seed(0 ) _A = sd_pipe( [prompt] , image=snake_case_ , generator=snake_case_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=snake_case_ , )[0] _A = image[0, -3:, -3:, -1] _A = image_from_tuple[0, -3:, -3:, -1] _A = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) _A = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase__ ( self ): _A = 'cpu' # ensure determinism for the device-dependent torch.Generator _A = self.dummy_cond_unet_upscale _A = DDPMScheduler() _A = DDIMScheduler(prediction_type='v_prediction' ) _A = self.dummy_vae _A = self.dummy_text_encoder _A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _A = Image.fromarray(np.uinta(snake_case_ ) ).convert('RGB' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _A = StableDiffusionUpscalePipeline( unet=snake_case_ , low_res_scheduler=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , max_noise_level=350 , ) _A = sd_pipe.to(snake_case_ ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) _A = 'A painting of a squirrel eating a burger' _A = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , ) _A = output.images assert image.shape[0] == 2 _A = torch.Generator(device=snake_case_ ).manual_seed(0 ) _A = sd_pipe( [prompt] , image=snake_case_ , generator=snake_case_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , ) _A = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def lowerCAmelCase__ ( self ): _A = self.dummy_cond_unet_upscale _A = DDPMScheduler() _A = DDIMScheduler(prediction_type='v_prediction' ) _A = self.dummy_vae _A = self.dummy_text_encoder _A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _A = Image.fromarray(np.uinta(snake_case_ ) ).convert('RGB' ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 _A = unet.half() _A = text_encoder.half() # make sure here that pndm scheduler skips prk _A = StableDiffusionUpscalePipeline( unet=snake_case_ , low_res_scheduler=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , max_noise_level=350 , ) _A = sd_pipe.to(snake_case_ ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) _A = 'A painting of a squirrel eating a burger' _A = torch.manual_seed(0 ) _A = sd_pipe( [prompt] , image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type='np' , ).images _A = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class lowerCamelCase( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self ): _A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-upscale/low_res_cat.png' ) _A = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale' '/upsampled_cat.npy' ) _A = 'stabilityai/stable-diffusion-x4-upscaler' _A = StableDiffusionUpscalePipeline.from_pretrained(snake_case_ ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) pipe.enable_attention_slicing() _A = 'a cat sitting on a park bench' _A = torch.manual_seed(0 ) _A = pipe( prompt=snake_case_ , image=snake_case_ , generator=snake_case_ , output_type='np' , ) _A = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-3 def lowerCAmelCase__ ( self ): _A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-upscale/low_res_cat.png' ) _A = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale' '/upsampled_cat_fp16.npy' ) _A = 'stabilityai/stable-diffusion-x4-upscaler' _A = StableDiffusionUpscalePipeline.from_pretrained( snake_case_ , torch_dtype=torch.floataa , ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) pipe.enable_attention_slicing() _A = 'a cat sitting on a park bench' _A = torch.manual_seed(0 ) _A = pipe( prompt=snake_case_ , image=snake_case_ , generator=snake_case_ , output_type='np' , ) _A = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def lowerCAmelCase__ ( self ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-upscale/low_res_cat.png' ) _A = 'stabilityai/stable-diffusion-x4-upscaler' _A = StableDiffusionUpscalePipeline.from_pretrained( snake_case_ , torch_dtype=torch.floataa , ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _A = 'a cat sitting on a park bench' _A = torch.manual_seed(0 ) _A = pipe( prompt=snake_case_ , image=snake_case_ , generator=snake_case_ , num_inference_steps=5 , output_type='np' , ) _A = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
27
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )] # initialize interval's left pointer and right pointer __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0 for i in range(1 , len(snake_case ) ): # case when current index is inside the interval if i <= right_pointer: __SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __SCREAMING_SNAKE_CASE : Dict = min_edge while go_next(snake_case , snake_case , snake_case ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1 return z_result def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]] def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(snake_case ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
74
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, *A, **A ): '''simple docstring''' warnings.warn( 'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use VideoMAEImageProcessor instead.', A, ) super().__init__(*A, **A )
28
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
0
"""simple docstring""" def lowercase ( lowerCAmelCase__ ): if not nums: # Makes sure that the list is not empty raise ValueError('''List is empty''' ) lowerCamelCase_ = sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
29
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig() # derive patch size from model name __SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 __SCREAMING_SNAKE_CASE : Optional[Any] = 12 __SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 __SCREAMING_SNAKE_CASE : int = 4_096 __SCREAMING_SNAKE_CASE : Tuple = 16 __SCREAMING_SNAKE_CASE : Optional[int] = 24 __SCREAMING_SNAKE_CASE : Optional[int] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 if model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Any = 336 __SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Any = 768 return config def a__ ( snake_case ): """simple docstring""" # text encoder if name == "token_embedding.weight": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' ) if key.startswith('''visual''' ): __SCREAMING_SNAKE_CASE : List[Any] = key_split[3] __SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Optional[Any] = val[ :dim ] __SCREAMING_SNAKE_CASE : Tuple = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim: ] else: if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : str = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Dict = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[-dim:] elif key.startswith('''mit''' ): __SCREAMING_SNAKE_CASE : List[str] = key_split[2] __SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : str = val[:dim, :] __SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Any = val[:dim] __SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2] __SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:] else: __SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2] __SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[:dim, :] __SCREAMING_SNAKE_CASE : int = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Dict = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Tuple = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : int = val[-dim:] else: __SCREAMING_SNAKE_CASE : int = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __SCREAMING_SNAKE_CASE : int = val.T __SCREAMING_SNAKE_CASE : Union[str, Any] = val return orig_state_dict def a__ ( snake_case ): """simple docstring""" if num_frames == 8: __SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy''' elif num_frames == 16: __SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy''' elif num_frames == 32: __SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy''' __SCREAMING_SNAKE_CASE : List[str] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , ) __SCREAMING_SNAKE_CASE : int = np.load(snake_case ) return list(snake_case ) def a__ ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name] __SCREAMING_SNAKE_CASE : Any = 8 if "16-frames" in model_name: __SCREAMING_SNAKE_CASE : Optional[int] = 16 elif "shot" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 32 __SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin''' gdown.cached_download(snake_case , snake_case , quiet=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model'''] else: __SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224 __SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case ) __SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) __SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case ) # Verify outputs __SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video __SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 ) print('''Probs:''' , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] ) elif model_name == "xclip-base-patch16": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] ) elif model_name == "xclip-large-patch14": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case , organization='''nielsr''' ) processor.push_to_hub(snake_case , organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __a = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['ReformerTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['ReformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ReformerAttention', 'ReformerForMaskedLM', 'ReformerForQuestionAnswering', 'ReformerForSequenceClassification', 'ReformerLayer', 'ReformerModel', 'ReformerModelWithLMHead', 'ReformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
from pathlib import Path import fire def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = Path(snake_case ) __SCREAMING_SNAKE_CASE : Dict = Path(snake_case ) dest_dir.mkdir(exist_ok=snake_case ) for path in src_dir.iterdir(): __SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] __SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name ) print(snake_case ) dest_path.open('''w''' ).write('''\n'''.join(snake_case ) ) if __name__ == "__main__": fire.Fire(minify)
74
0
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError('Input value must be an \'int\' type' ) SCREAMING_SNAKE_CASE_ = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
31
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 ) __SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 ) __SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
74
0
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) UpperCAmelCase_ = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) UpperCAmelCase_ = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) UpperCAmelCase_ = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModel) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
32
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} ) lowerCAmelCase_ = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) lowerCAmelCase_ = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = {} if self.train_dir is not None: __SCREAMING_SNAKE_CASE : Dict = self.train_dir if self.validation_dir is not None: __SCREAMING_SNAKE_CASE : Any = self.validation_dir __SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) lowerCAmelCase_ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class __UpperCamelCase : """simple docstring""" def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = input_size __SCREAMING_SNAKE_CASE : List[str] = mask_patch_size __SCREAMING_SNAKE_CASE : Dict = model_patch_size __SCREAMING_SNAKE_CASE : int = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('''Input size must be divisible by mask patch size''' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('''Mask patch size must be divisible by model patch size''' ) __SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size __SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size __SCREAMING_SNAKE_CASE : int = self.rand_size**2 __SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count] __SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = 1 __SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) ) __SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] ) __SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def a__ ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mim''' , snake_case , snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level() logger.setLevel(snake_case ) transformers.utils.logging.set_verbosity(snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. __SCREAMING_SNAKE_CASE : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0: __SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split ) __SCREAMING_SNAKE_CASE : int = split['''train'''] __SCREAMING_SNAKE_CASE : Dict = split['''test'''] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE : List[Any] = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: __SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(snake_case , '''decoder_type''' ): __SCREAMING_SNAKE_CASE : Any = '''simmim''' # adapt config __SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size __SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size __SCREAMING_SNAKE_CASE : str = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { '''image_size''': model_args.image_size, '''patch_size''': model_args.patch_size, '''encoder_stride''': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: __SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } __SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case ) if training_args.do_train: __SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names else: __SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names if data_args.image_column_name is not None: __SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name elif "image" in column_names: __SCREAMING_SNAKE_CASE : str = '''image''' elif "img" in column_names: __SCREAMING_SNAKE_CASE : List[str] = '''img''' else: __SCREAMING_SNAKE_CASE : Tuple = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py __SCREAMING_SNAKE_CASE : Any = Compose( [ Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator __SCREAMING_SNAKE_CASE : str = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(snake_case ): __SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]] __SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE : Union[str, Any] = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case ) # Initialize our trainer __SCREAMING_SNAKE_CASE : List[str] = Trainer( model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE : int = last_checkpoint __SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate() trainer.log_metrics('''eval''' , snake_case ) trainer.save_metrics('''eval''' , snake_case ) # Write model card and (optionally) push to hub __SCREAMING_SNAKE_CASE : Optional[Any] = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''masked-image-modeling''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-image-modeling'''], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case ) else: trainer.create_model_card(**snake_case ) if __name__ == "__main__": main()
74
0
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( '''kwargs, expected''' , [ ({'''num_shards''': 0, '''max_num_jobs''': 1}, []), ({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]), ({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCAmelCase , i + 1 ) for i in range(10 )]), ({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]), ({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: snake_case__ = _distribute_shards(**__lowerCAmelCase ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, max_num_jobs, expected''' , [ ({'''foo''': 0}, 10, [{'''foo''': 0}]), ({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]), ({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]), ({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]), ({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict: snake_case__ = _split_gen_kwargs(__lowerCAmelCase , __lowerCAmelCase ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, expected''' , [ ({'''foo''': 0}, 1), ({'''shards''': [0]}, 1), ({'''shards''': [0, 1, 2, 3]}, 4), ({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4), ({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4), ({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: if expected is RuntimeError: with pytest.raises(__lowerCAmelCase ): _number_of_shards_in_gen_kwargs(__lowerCAmelCase ) else: snake_case__ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase ) assert out == expected
33
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''data2vec-vision''' def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Any = hidden_size __SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = initializer_range __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : Any = image_size __SCREAMING_SNAKE_CASE : Optional[int] = patch_size __SCREAMING_SNAKE_CASE : Any = num_channels __SCREAMING_SNAKE_CASE : List[str] = use_mask_token __SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings __SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias __SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value __SCREAMING_SNAKE_CASE : str = drop_path_rate __SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : str = out_indices __SCREAMING_SNAKE_CASE : List[str] = pool_scales # auxiliary head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight __SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels __SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input __SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return 1e-4
74
0
"""simple docstring""" import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset SCREAMING_SNAKE_CASE_ = random.Random() def __snake_case ( _lowercase ,_lowercase=1.0 ,_lowercase=None ,_lowercase=None ): """simple docstring""" if rng is None: UpperCamelCase = global_rng UpperCamelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case_ ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCamelCase_ , lowerCamelCase_=7 , lowerCamelCase_=4_0_0 , lowerCamelCase_=2_0_0_0 , lowerCamelCase_=2_0_4_8 , lowerCamelCase_=1_2_8 , lowerCamelCase_=1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=3_0 , lowerCamelCase_=4_4_1_0_0 , ) -> List[str]: UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = min_seq_length UpperCamelCase = max_seq_length UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase = spectrogram_length UpperCamelCase = feature_size UpperCamelCase = num_audio_channels UpperCamelCase = hop_length UpperCamelCase = chunk_length UpperCamelCase = sampling_rate def UpperCAmelCase__ ( self) -> List[str]: return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def UpperCAmelCase__ ( self , lowerCamelCase_=False , lowerCamelCase_=False) -> Tuple: def _flatten(lowerCamelCase_): return list(itertools.chain(*lowerCamelCase_)) if equal_length: UpperCamelCase = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] else: # make sure that inputs increase in size UpperCamelCase = [ floats_list((x, self.feature_size)) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff) ] if numpify: UpperCamelCase = [np.asarray(lowerCamelCase_) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case_ ( lowerCamelCase_ , unittest.TestCase ): """simple docstring""" A_ = TvltFeatureExtractor def UpperCAmelCase__ ( self) -> Optional[int]: UpperCamelCase = TvltFeatureExtractionTester(self) def UpperCAmelCase__ ( self) -> Any: UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict) self.assertTrue(hasattr(lowerCamelCase_ , '''spectrogram_length''')) self.assertTrue(hasattr(lowerCamelCase_ , '''feature_size''')) self.assertTrue(hasattr(lowerCamelCase_ , '''num_audio_channels''')) self.assertTrue(hasattr(lowerCamelCase_ , '''hop_length''')) self.assertTrue(hasattr(lowerCamelCase_ , '''chunk_length''')) self.assertTrue(hasattr(lowerCamelCase_ , '''sampling_rate''')) def UpperCAmelCase__ ( self) -> List[Any]: UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = feat_extract_first.save_pretrained(lowerCamelCase_)[0] check_json_file_has_correct_format(lowerCamelCase_) UpperCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase_) UpperCamelCase = feat_extract_first.to_dict() UpperCamelCase = feat_extract_second.to_dict() UpperCamelCase = dict_first.pop('''mel_filters''') UpperCamelCase = dict_second.pop('''mel_filters''') self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_)) self.assertEqual(lowerCamelCase_ , lowerCamelCase_) def UpperCAmelCase__ ( self) -> Optional[int]: UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = os.path.join(lowerCamelCase_ , '''feat_extract.json''') feat_extract_first.to_json_file(lowerCamelCase_) UpperCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase_) UpperCamelCase = feat_extract_first.to_dict() UpperCamelCase = feat_extract_second.to_dict() UpperCamelCase = dict_first.pop('''mel_filters''') UpperCamelCase = dict_second.pop('''mel_filters''') self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_)) self.assertEqual(lowerCamelCase_ , lowerCamelCase_) def UpperCAmelCase__ ( self) -> Optional[int]: # Initialize feature_extractor UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict) # create three inputs of length 800, 1000, and 1200 UpperCamelCase = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)] UpperCamelCase = [np.asarray(lowerCamelCase_) for speech_input in speech_inputs] # Test not batched input UpperCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values self.assertTrue(encoded_audios.ndim == 4) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels) # Test batched UpperCamelCase = feature_extractor(lowerCamelCase_ , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values self.assertTrue(encoded_audios.ndim == 4) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels) # Test audio masking UpperCamelCase = feature_extractor( lowerCamelCase_ , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=lowerCamelCase_).audio_values self.assertTrue(encoded_audios.ndim == 4) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels) # Test 2-D numpy arrays are batched. UpperCamelCase = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)] UpperCamelCase = np.asarray(lowerCamelCase_) UpperCamelCase = feature_extractor(lowerCamelCase_ , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values self.assertTrue(encoded_audios.ndim == 4) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels) def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[Any]: UpperCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''') # automatic decoding with librispeech UpperCamelCase = ds.sort('''id''').select(range(lowerCamelCase_))[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def UpperCAmelCase__ ( self) -> List[Any]: UpperCamelCase = self._load_datasamples(1) UpperCamelCase = TvltFeatureExtractor() UpperCamelCase = feature_extractor(lowerCamelCase_ , return_tensors='''pt''').audio_values self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8)) UpperCamelCase = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]]) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCamelCase_ , atol=1e-4))
34
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=7 , _A : List[str]=True , _A : Dict=True , _A : Tuple=False , _A : Union[str, Any]=True , _A : List[str]=99 , _A : Union[str, Any]=32 , _A : str=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : Optional[int]=4 , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : str = seq_length __SCREAMING_SNAKE_CASE : int = is_training __SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask __SCREAMING_SNAKE_CASE : str = use_token_type_ids __SCREAMING_SNAKE_CASE : Any = use_labels __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : Any = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads __SCREAMING_SNAKE_CASE : List[str] = intermediate_size __SCREAMING_SNAKE_CASE : List[str] = hidden_act __SCREAMING_SNAKE_CASE : int = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size __SCREAMING_SNAKE_CASE : int = initializer_range __SCREAMING_SNAKE_CASE : List[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = num_choices __SCREAMING_SNAKE_CASE : Union[str, Any] = scope def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: __SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE : Any = None __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : int = None if self.use_labels: __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForMaskedLM(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : int = model( _A , attention_mask=_A , start_positions=_A , end_positions=_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels __SCREAMING_SNAKE_CASE : List[Any] = DistilBertForSequenceClassification(_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : List[str] , _A : int , _A : List[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForTokenClassification(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : int , _A : Optional[int] , _A : List[Any] , _A : int , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.num_choices __SCREAMING_SNAKE_CASE : int = DistilBertForMultipleChoice(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Optional[Any] = model( _A , attention_mask=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs __SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCAmelCase_ = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self ) __SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , dim=37 ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*_A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*_A ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*_A ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @slow @require_torch_gpu def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A ) __SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace( _A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) ) __SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A ) loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )[0] __SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _A ) __SCREAMING_SNAKE_CASE : Any = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
74
0
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class lowercase ( unittest.TestCase ): def lowercase__ ( self : int ): SCREAMING_SNAKE_CASE__ : Optional[int] = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) SCREAMING_SNAKE_CASE__ : Any = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above SCREAMING_SNAKE_CASE__ : Optional[int] = tf_top_k_top_p_filtering(_lowercase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = output[output != -float('''inf''' )] SCREAMING_SNAKE_CASE__ : Dict = tf.cast( tf.where(tf.not_equal(_lowercase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(_lowercase , _lowercase , rtol=1E-12 ) tf.debugging.assert_equal(_lowercase , _lowercase ) @require_tf class lowercase ( unittest.TestCase , _UpperCAmelCase ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): lowerCamelCase : Optional[int] = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def lowercase__ ( self : Any ): # TF-only test: tf.saved_model export SCREAMING_SNAKE_CASE__ : Any = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) SCREAMING_SNAKE_CASE__ : List[Any] = 2 SCREAMING_SNAKE_CASE__ : Dict = 2 class lowercase ( tf.Module ): def __init__( self : List[str] , _lowercase : List[Any] ): super(_lowercase , self ).__init__() SCREAMING_SNAKE_CASE__ : Tuple = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=_lowercase , ) def lowercase__ ( self : List[str] , _lowercase : Tuple , _lowercase : List[str] ): SCREAMING_SNAKE_CASE__ : str = self.model.generate( input_ids=_lowercase , attention_mask=_lowercase , max_new_tokens=_lowercase , return_dict_in_generate=_lowercase , ) return {"sequences": outputs["sequences"]} SCREAMING_SNAKE_CASE__ : Any = [[2, 0], [1_02, 1_03]] SCREAMING_SNAKE_CASE__ : Any = [[1, 0], [1, 1]] SCREAMING_SNAKE_CASE__ : List[Any] = DummyModel(model=_lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(_lowercase , _lowercase , signatures={'''serving_default''': dummy_model.serving} ) SCREAMING_SNAKE_CASE__ : str = tf.saved_model.load(_lowercase ).signatures['''serving_default'''] for batch_size in range(1 , len(_lowercase ) + 1 ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = { '''input_ids''': tf.constant(dummy_input_ids[:batch_size] ), '''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ), } SCREAMING_SNAKE_CASE__ : Optional[int] = serving_func(**_lowercase )['''sequences'''] SCREAMING_SNAKE_CASE__ : Optional[int] = test_model.generate(**_lowercase , max_new_tokens=_lowercase ) tf.debugging.assert_equal(_lowercase , _lowercase ) @slow def lowercase__ ( self : Optional[Any] ): # TF-only test: tf.saved_model export SCREAMING_SNAKE_CASE__ : List[str] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) SCREAMING_SNAKE_CASE__ : Any = 1 SCREAMING_SNAKE_CASE__ : Tuple = 2 class lowercase ( tf.Module ): def __init__( self : Union[str, Any] , _lowercase : int ): super(_lowercase , self ).__init__() SCREAMING_SNAKE_CASE__ : List[str] = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=_lowercase , ) def lowercase__ ( self : Dict , _lowercase : Tuple , _lowercase : Union[str, Any] ): SCREAMING_SNAKE_CASE__ : Any = self.model.generate( input_ids=_lowercase , attention_mask=_lowercase , max_new_tokens=_lowercase , return_dict_in_generate=_lowercase , ) return {"sequences": outputs["sequences"]} SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[2], [1_02, 1_03]] SCREAMING_SNAKE_CASE__ : int = [[1], [1, 1]] SCREAMING_SNAKE_CASE__ : Any = DummyModel(model=_lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(_lowercase , _lowercase , signatures={'''serving_default''': dummy_model.serving} ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.saved_model.load(_lowercase ).signatures['''serving_default'''] for input_row in range(len(_lowercase ) ): SCREAMING_SNAKE_CASE__ : str = { '''input_ids''': tf.constant([dummy_input_ids[input_row]] ), '''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ), } SCREAMING_SNAKE_CASE__ : List[str] = serving_func(**_lowercase )['''sequences'''] SCREAMING_SNAKE_CASE__ : Optional[int] = test_model.generate(**_lowercase , max_new_tokens=_lowercase ) tf.debugging.assert_equal(_lowercase , _lowercase ) @slow @require_tensorflow_text def lowercase__ ( self : Optional[int] ): # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_lowercase ) class lowercase ( tf.keras.layers.Layer ): def __init__( self : Any ): super().__init__() SCREAMING_SNAKE_CASE__ : int = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(_lowercase , '''spiece.model''' ) , '''rb''' ).read() ) SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) def lowercase__ ( self : List[str] , _lowercase : Optional[Any] , *_lowercase : Tuple , **_lowercase : str ): SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.tokenize(_lowercase ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = text.pad_model_inputs( _lowercase , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase ) return self.tokenizer.detokenize(_lowercase ) SCREAMING_SNAKE_CASE__ : Any = CompleteSentenceTransformer() SCREAMING_SNAKE_CASE__ : str = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' ) SCREAMING_SNAKE_CASE__ : Dict = complete_model(_lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] = tf.keras.Model(_lowercase , _lowercase ) keras_model.save(_lowercase ) def lowercase__ ( self : str ): # Has PT equivalent: this test relies on random sampling SCREAMING_SNAKE_CASE__ : Union[str, Any] = { '''do_sample''': True, '''num_beams''': 1, '''top_p''': 0.7, '''top_k''': 10, '''temperature''': 0.7, } SCREAMING_SNAKE_CASE__ : Any = 14 SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''Hello, my dog is cute and''' SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(_lowercase , return_tensors='''tf''' ) SCREAMING_SNAKE_CASE__ : List[str] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) SCREAMING_SNAKE_CASE__ : List[str] = 6_38 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict = model.generate(**_lowercase , eos_token_id=_lowercase , **_lowercase ) self.assertTrue(expectation == len(generated_tokens[0] ) ) SCREAMING_SNAKE_CASE__ : List[str] = [6_38, 1_98] with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model.generate(**_lowercase , eos_token_id=_lowercase , **_lowercase ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowercase__ ( self : Optional[int] ): # Has PT equivalent: ample use of framework-specific code SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) SCREAMING_SNAKE_CASE__ : List[Any] = '''Hugging Face is a technology company based in New York and Paris.''' SCREAMING_SNAKE_CASE__ : Optional[int] = bart_tokenizer(_lowercase , return_tensors='''tf''' ).input_ids SCREAMING_SNAKE_CASE__ : str = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) SCREAMING_SNAKE_CASE__ : str = bart_model.generate(_lowercase ).numpy() class lowercase ( _UpperCAmelCase ): def lowercase__ ( self : Optional[Any] , _lowercase : Optional[int] , _lowercase : List[str]=None , **_lowercase : Union[str, Any] ): return super().call(_lowercase , **_lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) SCREAMING_SNAKE_CASE__ : List[Any] = bart_model.generate(_lowercase , foo='''bar''' ).numpy() self.assertTrue(np.array_equal(_lowercase , _lowercase ) ) class lowercase ( bart_model.model.encoder.__class__ ): def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] , **_lowercase : List[str] ): return super().call(_lowercase , **_lowercase ) SCREAMING_SNAKE_CASE__ : Dict = FakeEncoder(bart_model.config , bart_model.model.shared ) SCREAMING_SNAKE_CASE__ : List[Any] = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) SCREAMING_SNAKE_CASE__ : Dict = bart_model.generate(_lowercase ).numpy() with self.assertRaises(_lowercase ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(_lowercase , foo='''bar''' )
35
import logging import os import threading import time try: import warnings except ImportError: lowercase_ = None try: import msvcrt except ImportError: lowercase_ = None try: import fcntl except ImportError: lowercase_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowercase_ = OSError # Data # ------------------------------------------------ lowercase_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] lowercase_ = """3.0.12""" lowercase_ = None def a__ ( ): """simple docstring""" global _logger __SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ ) return _logger class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file return None def __str__( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[Any] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = lock return None def __enter__( self : Any ): """simple docstring""" return self.lock def __exit__( self : str , _A : Any , _A : int , _A : Any ): """simple docstring""" self.lock.release() return None class __UpperCamelCase : """simple docstring""" def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A ) # The path to the lock file. __SCREAMING_SNAKE_CASE : Tuple = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __SCREAMING_SNAKE_CASE : str = None # The default timeout value. __SCREAMING_SNAKE_CASE : Any = timeout # We use this lock primarily for the lock counter. __SCREAMING_SNAKE_CASE : int = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __SCREAMING_SNAKE_CASE : int = 0 return None @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._timeout @timeout.setter def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = float(_A ) return None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" raise NotImplementedError() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" raise NotImplementedError() @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file_fd is not None def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ): """simple docstring""" if timeout is None: __SCREAMING_SNAKE_CASE : Optional[int] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __SCREAMING_SNAKE_CASE : Tuple = id(self ) __SCREAMING_SNAKE_CASE : Any = self._lock_file __SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(_A ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCAmelCase__ ( self : int , _A : List[str]=False ): """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __SCREAMING_SNAKE_CASE : Optional[int] = id(self ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() __SCREAMING_SNAKE_CASE : int = 0 logger().debug(F'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : int ): """simple docstring""" self.acquire() return self def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ): """simple docstring""" self.release() return None def __del__( self : int ): """simple docstring""" self.release(force=_A ) return None def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = os.path.basename(_A ) if len(_A ) > max_length and max_length > 0: __SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) ) __SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(_A , _A ) else: return path class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ): """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(_A , timeout=_A , max_filename_length=_A ) __SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A ) except OSError: pass else: try: msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : str = fd return None def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self._lock_file_fd __SCREAMING_SNAKE_CASE : int = None msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 ) os.close(_A ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax super().__init__(_A , timeout=_A , max_filename_length=_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC __SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A ) try: fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : int = fd return None def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd __SCREAMING_SNAKE_CASE : Any = None fcntl.flock(_A , fcntl.LOCK_UN ) os.close(_A ) return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A ) except OSError: pass else: __SCREAMING_SNAKE_CASE : List[str] = fd return None def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" os.close(self._lock_file_fd ) __SCREAMING_SNAKE_CASE : Optional[Any] = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowercase_ = None if msvcrt: lowercase_ = WindowsFileLock elif fcntl: lowercase_ = UnixFileLock else: lowercase_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
74
0
def lowercase ( __A : str ) -> str: '''simple docstring''' snake_case : Optional[int] = 0 # if input_string is "aba" than new_input_string become "a|b|a" snake_case : Optional[int] = """""" snake_case : Tuple = """""" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(__A ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring snake_case , snake_case : Any = 0, 0 # length[i] shows the length of palindromic substring with center i snake_case : Union[str, Any] = [1 for i in range(len(__A ) )] # for each character in new_string find corresponding palindromic string snake_case : Tuple = 0 for j in range(len(__A ) ): snake_case : List[str] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(__A ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 snake_case : List[str] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: snake_case : str = j - k + 1 # noqa: E741 snake_case : int = j + k - 1 # update max_length and start position if max_length < length[j]: snake_case : str = length[j] snake_case : Optional[Any] = j # create that string snake_case : int = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
36
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[Any] , **_A : Dict ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**_A ) def UpperCAmelCase__ ( self : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : Any = [] __SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) ) __SCREAMING_SNAKE_CASE : Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' ) __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = [] for element in html_code.descendants: if type(_A ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip() if not text_in_this_tag: continue all_doc_strings.append(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A ) stringaxtag_seq.append(_A ) stringaxsubs_seq.append(_A ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' for tagname, subs in zip(_A , _A ): xpath += F'''/{tagname}''' if subs != 0: xpath += F'''[{subs}]''' return xpath def __call__( self : Optional[int] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = False # Check that strings has a valid type if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = True elif isinstance(_A , (list, tuple) ): if len(_A ) == 0 or isinstance(html_strings[0] , _A ): __SCREAMING_SNAKE_CASE : List[Any] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F'''but is of type {type(_A )}.''' ) __SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) ) if not is_batched: __SCREAMING_SNAKE_CASE : Dict = [html_strings] # Get nodes + xpaths __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Tuple = [] for html_string in html_strings: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A ) nodes.append(_A ) __SCREAMING_SNAKE_CASE : Dict = [] for node, tag_list, sub_list in zip(_A , _A , _A ): __SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A ) xpath_strings.append(_A ) xpaths.append(_A ) # return as Dict __SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths} __SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A ) return encoded_inputs
74
0
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : List[Any] ): a__ : Union[str, Any] = inspect.getfile(accelerate.test_utils ) a__ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) a__ : List[Any] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) a__ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def _UpperCamelCase( self : Optional[int] ): print(f'''Found {torch.cuda.device_count()} devices.''' ) a__ : int = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() ) @require_multi_gpu def _UpperCamelCase( self : Optional[int] ): print(f'''Found {torch.cuda.device_count()} devices.''' ) a__ : Tuple = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(f'''Command: {cmd}''' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() ) @require_multi_gpu def _UpperCamelCase( self : Tuple ): a__ : List[str] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() ) @require_multi_gpu def _UpperCamelCase( self : Any ): print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' ) a__ : Optional[int] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() ) if __name__ == "__main__": UpperCamelCase : str = Accelerator() UpperCamelCase : int = (accelerator.state.process_index + 2, 10) UpperCamelCase : Optional[Any] = torch.randint(0, 10, shape).to(accelerator.device) UpperCamelCase : Optional[int] = """""" UpperCamelCase : Union[str, Any] = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." UpperCamelCase : int = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." UpperCamelCase : Tuple = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
37
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger() def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ): """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": __SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case ) else: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case ) if hidden_sizes == 192: __SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case ) if hidden_sizes == 256: __SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case ) if hidden_sizes == 384: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case ) from_model.eval() __SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval() __SCREAMING_SNAKE_CASE : int = OrderedDict() __SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict() __SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() ) __SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() ) print(len(snake_case ) , len(snake_case ) ) for i in range(len(snake_case ) ): __SCREAMING_SNAKE_CASE : int = weights[og_keys[i]] our_model.load_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) ) __SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one." __SCREAMING_SNAKE_CASE : Union[str, Any] = name print(snake_case ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'''Pushed {checkpoint_name}''' ) def a__ ( snake_case , snake_case = None , snake_case = True ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json''' __SCREAMING_SNAKE_CASE : int = 1_000 __SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels) __SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE : Optional[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : str = idalabel __SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''levit-128S''': 128, '''levit-128''': 128, '''levit-192''': 192, '''levit-256''': 256, '''levit-384''': 384, } __SCREAMING_SNAKE_CASE : Optional[int] = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case ) return config, expected_shape if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) lowercase_ = parser.parse_args() lowercase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
74
0
'''simple docstring''' import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ = '''Wav2Vec2FeatureExtractor''' lowerCamelCase__ = '''AutoTokenizer''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) snake_case__ : Optional[int] = self.feature_extractor snake_case__ : Optional[Any] = False @classmethod def __UpperCamelCase ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): try: return super().from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) except OSError: warnings.warn( f"Loading a tokenizer inside {cls.__name__} from a config that does not" """ include a `tokenizer_class` attribute is deprecated and will be """ """removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`""" """ attribute to either your `config.json` or `tokenizer_config.json` """ """file to suppress this warning: """ , __SCREAMING_SNAKE_CASE , ) snake_case__ : List[str] = WavaVecaFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[Any] = WavaVecaCTCTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) return cls(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE ) def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if "raw_speech" in kwargs: warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" ) snake_case__ : Optional[Any] = kwargs.pop("""raw_speech""" ) else: snake_case__ : Union[str, Any] = kwargs.pop("""audio""" , __SCREAMING_SNAKE_CASE ) snake_case__ : Union[str, Any] = kwargs.pop("""sampling_rate""" , __SCREAMING_SNAKE_CASE ) snake_case__ : Dict = kwargs.pop("""text""" , __SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: snake_case__ : Optional[Any] = args[0] snake_case__ : Dict = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if audio is not None: snake_case__ : Optional[int] = self.feature_extractor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if text is not None: snake_case__ : Any = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if text is None: return inputs elif audio is None: return encodings else: snake_case__ : List[Any] = encodings["""input_ids"""] return inputs def __UpperCamelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): # For backward compatibility if self._in_target_context_manager: return self.current_processor.pad(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) snake_case__ : Any = kwargs.pop("""input_features""" , __SCREAMING_SNAKE_CASE ) snake_case__ : str = kwargs.pop("""labels""" , __SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: snake_case__ : Dict = args[0] snake_case__ : int = args[1:] if input_features is not None: snake_case__ : Any = self.feature_extractor.pad(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if labels is not None: snake_case__ : List[Any] = self.tokenizer.pad(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if labels is None: return input_features elif input_features is None: return labels else: snake_case__ : Tuple = labels["""input_ids"""] return input_features def __UpperCamelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @contextmanager def __UpperCamelCase ( self ): warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your audio inputs, or in a separate call.""" ) snake_case__ : List[str] = True snake_case__ : Any = self.tokenizer yield snake_case__ : str = self.feature_extractor snake_case__ : str = False
38
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
0
from abc import ABC, abstractmethod from typing import List, Optional class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int ) ->Optional[Any]: # test for the above condition self.test() def snake_case__( self : int ) ->str: snake_case_ = 0 snake_case_ = False while not completed: if counter == 1: self.reset() snake_case_ = self.advance() if not self.does_advance(_UpperCamelCase ): raise Exception( '''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' ) snake_case_, snake_case_, snake_case_ = self.update(_UpperCamelCase ) counter += 1 if counter > 1_0_0_0_0: raise Exception('''update() does not fulfill the constraint.''' ) if self.remaining() != 0: raise Exception('''Custom Constraint is not defined correctly.''' ) @abstractmethod def snake_case__( self : List[Any] ) ->Union[str, Any]: raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def snake_case__( self : int , _UpperCamelCase : int ) ->List[str]: raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def snake_case__( self : Union[str, Any] , _UpperCamelCase : int ) ->int: raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def snake_case__( self : int ) ->str: raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def snake_case__( self : int ) ->str: raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def snake_case__( self : List[str] , _UpperCamelCase : List[Any]=False ) ->List[Any]: raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class snake_case_ ( __A ): '''simple docstring''' def __init__( self : Optional[Any] , _UpperCamelCase : List[int] ) ->Dict: super(_UpperCamelCase , self ).__init__() if not isinstance(_UpperCamelCase , _UpperCamelCase ) or len(_UpperCamelCase ) == 0: raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(_UpperCamelCase , _UpperCamelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) snake_case_ = token_ids snake_case_ = len(self.token_ids ) snake_case_ = -1 # the index of the currently fulfilled step snake_case_ = False def snake_case__( self : Dict ) ->Dict: if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def snake_case__( self : Union[str, Any] , _UpperCamelCase : int ) ->Optional[Any]: if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCamelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def snake_case__( self : Union[str, Any] , _UpperCamelCase : int ) ->int: if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCamelCase )}''' ) snake_case_ = False snake_case_ = False snake_case_ = False if self.does_advance(_UpperCamelCase ): self.fulfilled_idx += 1 snake_case_ = True if self.fulfilled_idx == (self.seqlen - 1): snake_case_ = True snake_case_ = completed else: # failed to make progress. snake_case_ = True self.reset() return stepped, completed, reset def snake_case__( self : Any ) ->Union[str, Any]: snake_case_ = False snake_case_ = 0 def snake_case__( self : Union[str, Any] ) ->int: return self.seqlen - (self.fulfilled_idx + 1) def snake_case__( self : str , _UpperCamelCase : Union[str, Any]=False ) ->int: snake_case_ = PhrasalConstraint(self.token_ids ) if stateful: snake_case_ = self.seqlen snake_case_ = self.fulfilled_idx snake_case_ = self.completed return new_constraint class snake_case_ : '''simple docstring''' def __init__( self : List[str] , _UpperCamelCase : List[List[int]] , _UpperCamelCase : List[Any]=True ) ->str: snake_case_ = max([len(_UpperCamelCase ) for one in nested_token_ids] ) snake_case_ = {} for token_ids in nested_token_ids: snake_case_ = root for tidx, token_id in enumerate(_UpperCamelCase ): if token_id not in level: snake_case_ = {} snake_case_ = level[token_id] if no_subsets and self.has_subsets(_UpperCamelCase , _UpperCamelCase ): raise ValueError( '''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is''' f''' {nested_token_ids}.''' ) snake_case_ = root def snake_case__( self : Any , _UpperCamelCase : List[Any] ) ->Optional[Any]: snake_case_ = self.trie for current_token in current_seq: snake_case_ = start[current_token] snake_case_ = list(start.keys() ) return next_tokens def snake_case__( self : Optional[int] , _UpperCamelCase : int ) ->Optional[int]: snake_case_ = self.next_tokens(_UpperCamelCase ) return len(_UpperCamelCase ) == 0 def snake_case__( self : List[Any] , _UpperCamelCase : List[Any] ) ->Dict: snake_case_ = list(root.values() ) if len(_UpperCamelCase ) == 0: return 1 else: return sum([self.count_leaves(_UpperCamelCase ) for nn in next_nodes] ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] ) ->int: snake_case_ = self.count_leaves(_UpperCamelCase ) return len(_UpperCamelCase ) != leaf_count class snake_case_ ( __A ): '''simple docstring''' def __init__( self : Optional[int] , _UpperCamelCase : List[List[int]] ) ->Any: super(_UpperCamelCase , self ).__init__() if not isinstance(_UpperCamelCase , _UpperCamelCase ) or len(_UpperCamelCase ) == 0: raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(_UpperCamelCase , _UpperCamelCase ) for token_ids in nested_token_ids ): raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(_UpperCamelCase , _UpperCamelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) snake_case_ = DisjunctiveTrie(_UpperCamelCase ) snake_case_ = nested_token_ids snake_case_ = self.trie.max_height snake_case_ = [] snake_case_ = False def snake_case__( self : Optional[int] ) ->Optional[int]: snake_case_ = self.trie.next_tokens(self.current_seq ) if len(_UpperCamelCase ) == 0: return None else: return token_list def snake_case__( self : Dict , _UpperCamelCase : int ) ->Dict: if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCamelCase )}''' ) snake_case_ = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def snake_case__( self : Tuple , _UpperCamelCase : int ) ->Optional[Any]: if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCamelCase )}''' ) snake_case_ = False snake_case_ = False snake_case_ = False if self.does_advance(_UpperCamelCase ): self.current_seq.append(_UpperCamelCase ) snake_case_ = True else: snake_case_ = True self.reset() snake_case_ = self.trie.reached_leaf(self.current_seq ) snake_case_ = completed return stepped, completed, reset def snake_case__( self : List[Any] ) ->str: snake_case_ = False snake_case_ = [] def snake_case__( self : Tuple ) ->Dict: if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : List[Any]=False ) ->Optional[int]: snake_case_ = DisjunctiveConstraint(self.token_ids ) if stateful: snake_case_ = self.seqlen snake_case_ = self.current_seq snake_case_ = self.completed return new_constraint class snake_case_ : '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : List[Constraint] ) ->str: snake_case_ = constraints # max # of steps required to fulfill a given constraint snake_case_ = max([c.seqlen for c in constraints] ) snake_case_ = len(_UpperCamelCase ) snake_case_ = False self.init_state() def snake_case__( self : Tuple ) ->Dict: snake_case_ = [] snake_case_ = None snake_case_ = [constraint.copy(stateful=_UpperCamelCase ) for constraint in self.constraints] def snake_case__( self : Tuple ) ->int: snake_case_ = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def snake_case__( self : Optional[Any] ) ->Optional[Any]: snake_case_ = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" snake_case_ = constraint.advance() if isinstance(_UpperCamelCase , _UpperCamelCase ): token_list.append(_UpperCamelCase ) elif isinstance(_UpperCamelCase , _UpperCamelCase ): token_list.extend(_UpperCamelCase ) else: snake_case_ = self.inprogress_constraint.advance() if isinstance(_UpperCamelCase , _UpperCamelCase ): token_list.append(_UpperCamelCase ) elif isinstance(_UpperCamelCase , _UpperCamelCase ): token_list.extend(_UpperCamelCase ) if len(_UpperCamelCase ) == 0: return None else: return token_list def snake_case__( self : Dict , _UpperCamelCase : Optional[List[int]] ) ->List[Any]: self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint snake_case_, snake_case_ = self.add(_UpperCamelCase ) # the entire list of constraints are fulfilled if self.completed: break def snake_case__( self : Optional[int] , _UpperCamelCase : int ) ->List[Any]: if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' ) snake_case_, snake_case_ = False, False if self.completed: snake_case_ = True snake_case_ = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state snake_case_, snake_case_, snake_case_ = self.inprogress_constraint.update(_UpperCamelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCamelCase ) ) snake_case_ = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) snake_case_ = None if len(self.pending_constraints ) == 0: # we're done! snake_case_ = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_UpperCamelCase ): snake_case_, snake_case_, snake_case_ = pending_constraint.update(_UpperCamelCase ) if not stepped: raise Exception( '''`constraint.update(token_id)` is not yielding incremental progress, ''' '''even though `constraint.does_advance(token_id)` is true.''' ) if complete: self.complete_constraints.append(_UpperCamelCase ) snake_case_ = None if not complete and stepped: snake_case_ = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". snake_case_ = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. snake_case_ = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def snake_case__( self : int , _UpperCamelCase : List[str]=True ) ->Optional[Any]: snake_case_ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: snake_case_ = [ constraint.copy(stateful=_UpperCamelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: snake_case_ = self.inprogress_constraint.copy(stateful=_UpperCamelCase ) snake_case_ = [constraint.copy() for constraint in self.pending_constraints] return new_state
39
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowercase_ = logging.get_logger(__name__) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = set() __SCREAMING_SNAKE_CASE : str = [] def parse_line(snake_case ): for line in fp: if isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(snake_case ) buffer.clear() continue else: __SCREAMING_SNAKE_CASE : int = line.strip() buffer.append(snake_case ) if from_gh: for filename in os.listdir(snake_case ): __SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case ) if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with open(snake_case ) as fp: parse_line(snake_case ) else: try: with zipfile.ZipFile(snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with z.open(snake_case ) as fp: parse_line(snake_case ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = set() __SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) ) return selected_warnings if __name__ == "__main__": def a__ ( snake_case ): """simple docstring""" return values.split(''',''' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) lowercase_ = parser.parse_args() lowercase_ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowercase_ = extract_warnings(args.output_dir, args.targets) lowercase_ = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
74
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''', } class lowerCAmelCase_ ( a__ , a__ ): UpperCAmelCase__ : int = "convnextv2" def __init__( self, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-12, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=224, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Tuple: super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = num_channels UpperCamelCase : str = patch_size UpperCamelCase : Tuple = num_stages UpperCamelCase : Optional[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes UpperCamelCase : Optional[Any] = [3, 3, 9, 3] if depths is None else depths UpperCamelCase : Optional[Any] = hidden_act UpperCamelCase : Any = initializer_range UpperCamelCase : Tuple = layer_norm_eps UpperCamelCase : Optional[Any] = drop_path_rate UpperCamelCase : int = image_size UpperCamelCase : Optional[Any] = ['stem'] + [F"""stage{idx}""" for idx in range(1, len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase : int = get_aligned_output_features_output_indices( out_features=SCREAMING_SNAKE_CASE_, out_indices=SCREAMING_SNAKE_CASE_, stage_names=self.stage_names )
40
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : Dict = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim __SCREAMING_SNAKE_CASE : Tuple = in_channels __SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A ) __SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A ) # 3. Define transformers blocks __SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList( [ BasicTransformerBlock( _A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , ) for d in range(_A ) ] ) __SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A ) def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape __SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames __SCREAMING_SNAKE_CASE : Dict = hidden_states __SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A ) __SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A ) # 2. Blocks for block in self.transformer_blocks: __SCREAMING_SNAKE_CASE : Optional[Any] = block( _A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , ) # 3. Output __SCREAMING_SNAKE_CASE : Any = self.proj_out(_A ) __SCREAMING_SNAKE_CASE : List[str] = ( hidden_states[None, None, :] .reshape(_A , _A , _A , _A , _A ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=_A )
74
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 'yolos' def __init__( self : Union[str, Any] ,lowercase__ : Any=7_6_8 ,lowercase__ : Optional[Any]=1_2 ,lowercase__ : Optional[Any]=1_2 ,lowercase__ : List[Any]=3_0_7_2 ,lowercase__ : List[Any]="gelu" ,lowercase__ : Optional[int]=0.0 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0_2 ,lowercase__ : Any=1e-1_2 ,lowercase__ : Optional[Any]=[5_1_2, 8_6_4] ,lowercase__ : Union[str, Any]=1_6 ,lowercase__ : Optional[int]=3 ,lowercase__ : Any=True ,lowercase__ : str=1_0_0 ,lowercase__ : int=True ,lowercase__ : str=False ,lowercase__ : List[Any]=1 ,lowercase__ : List[Any]=5 ,lowercase__ : Dict=2 ,lowercase__ : int=5 ,lowercase__ : int=2 ,lowercase__ : Optional[Any]=0.1 ,**lowercase__ : int ,): super().__init__(**lowercase__ ) __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = initializer_range __lowercase = layer_norm_eps __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = qkv_bias __lowercase = num_detection_tokens __lowercase = use_mid_position_embeddings __lowercase = auxiliary_loss # Hungarian matcher __lowercase = class_cost __lowercase = bbox_cost __lowercase = giou_cost # Loss coefficients __lowercase = bbox_loss_coefficient __lowercase = giou_loss_coefficient __lowercase = eos_coefficient class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE ( self : str ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE ( self : str ): return 1e-4 @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): return 1_2
41
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase_ = """src/diffusers""" lowercase_ = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowercase_ = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase_ = spec.loader.load_module() def a__ ( snake_case , snake_case ): """simple docstring""" return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = object_name.split('''.''' ) __SCREAMING_SNAKE_CASE : str = 0 # First let's find the module where our object lives. __SCREAMING_SNAKE_CASE : Any = parts[i] while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ): i += 1 if i < len(snake_case ): __SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] ) if i >= len(snake_case ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Now let's find the class / func in the code! __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(snake_case ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __SCREAMING_SNAKE_CASE : List[Any] = line_index while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index] return "".join(snake_case ) lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowercase_ = re.compile(R"""<FILL\s+[^>]*>""") def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = code.split('''\n''' ) __SCREAMING_SNAKE_CASE : Dict = 0 while idx < len(snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(snake_case ): return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0 if has_indent: __SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}''' __SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case ) return result[len('''class Bla:\n''' ) :] if has_indent else result def a__ ( snake_case , snake_case=False ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[str] = f.readlines() __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(snake_case ): __SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups() __SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case ) __SCREAMING_SNAKE_CASE : str = get_indent(snake_case ) __SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2 __SCREAMING_SNAKE_CASE : Dict = theoretical_indent __SCREAMING_SNAKE_CASE : Optional[int] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __SCREAMING_SNAKE_CASE : List[Any] = True while line_index < len(snake_case ) and should_continue: line_index += 1 if line_index >= len(snake_case ): break __SCREAMING_SNAKE_CASE : Any = lines[line_index] __SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index] __SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case ) # Remove any nested `Copied from` comments to avoid circular copies __SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups() __SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case ) if option.strip() == "all-casing": __SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code ) __SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] __SCREAMING_SNAKE_CASE : str = start_index + 1 if overwrite and len(snake_case ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) return diffs def a__ ( snake_case = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = [] for filename in all_files: __SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase_ = parser.parse_args() check_copies(args.fix_and_overwrite)
74
0
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: lowerCamelCase_ = original_name.split('.' )[0] lowerCamelCase_ = key.split('.' ) lowerCamelCase_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] ) lowerCamelCase_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] ) lowerCamelCase_ = orig_block_num - offset lowerCamelCase_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' ) return key def _UpperCamelCase ( __UpperCamelCase ) -> Dict: lowerCamelCase_ = OrderedDict() lowerCamelCase_ ,lowerCamelCase_ = 0, 0 for key, value in state_dict.items(): if key.startswith('network' ): lowerCamelCase_ = key.replace('network' ,'poolformer.encoder' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('bias' ) and "patch_embed" not in key: patch_emb_offset += 1 lowerCamelCase_ = key[: key.find('proj' )] lowerCamelCase_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' ) lowerCamelCase_ = key.replace('proj' ,'projection' ) if key.endswith('bias' ): total_embed_found += 1 if "patch_embeddings" in key: lowerCamelCase_ = 'poolformer.encoder.' + key if "mlp.fc1" in key: lowerCamelCase_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,'mlp.fc1' ,'output.conv1' ) if "mlp.fc2" in key: lowerCamelCase_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,'mlp.fc2' ,'output.conv2' ) if "norm1" in key: lowerCamelCase_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,'norm1' ,'before_norm' ) if "norm2" in key: lowerCamelCase_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,'norm2' ,'after_norm' ) if "layer_scale_1" in key: lowerCamelCase_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,'layer_scale_1' ,'layer_scale_1' ) if "layer_scale_2" in key: lowerCamelCase_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,'layer_scale_2' ,'layer_scale_2' ) if "head" in key: lowerCamelCase_ = key.replace('head' ,'classifier' ) lowerCamelCase_ = value return new_state_dict def _UpperCamelCase ( ) -> Optional[Any]: lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCamelCase_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return image @torch.no_grad() def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: lowerCamelCase_ = PoolFormerConfig() # set attributes based on model_name lowerCamelCase_ = 'huggingface/label-files' lowerCamelCase_ = model_name[-3:] lowerCamelCase_ = 10_00 lowerCamelCase_ = 'imagenet-1k-id2label.json' lowerCamelCase_ = (1, 10_00) # set config attributes lowerCamelCase_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) ) lowerCamelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} lowerCamelCase_ = idalabel lowerCamelCase_ = {v: k for k, v in idalabel.items()} if size == "s12": lowerCamelCase_ = [2, 2, 6, 2] lowerCamelCase_ = [64, 1_28, 3_20, 5_12] lowerCamelCase_ = 4.0 lowerCamelCase_ = 0.9 elif size == "s24": lowerCamelCase_ = [4, 4, 12, 4] lowerCamelCase_ = [64, 1_28, 3_20, 5_12] lowerCamelCase_ = 4.0 lowerCamelCase_ = 0.9 elif size == "s36": lowerCamelCase_ = [6, 6, 18, 6] lowerCamelCase_ = [64, 1_28, 3_20, 5_12] lowerCamelCase_ = 4.0 lowerCamelCase_ = 1e-6 lowerCamelCase_ = 0.9 elif size == "m36": lowerCamelCase_ = [6, 6, 18, 6] lowerCamelCase_ = [96, 1_92, 3_84, 7_68] lowerCamelCase_ = 4.0 lowerCamelCase_ = 1e-6 lowerCamelCase_ = 0.95 elif size == "m48": lowerCamelCase_ = [8, 8, 24, 8] lowerCamelCase_ = [96, 1_92, 3_84, 7_68] lowerCamelCase_ = 4.0 lowerCamelCase_ = 1e-6 lowerCamelCase_ = 0.95 else: raise ValueError(f'''Size {size} not supported''' ) # load image processor lowerCamelCase_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase ) # Prepare image lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=__UpperCamelCase ,return_tensors='pt' ).pixel_values logger.info(f'''Converting model {model_name}...''' ) # load original state dict lowerCamelCase_ = torch.load(__UpperCamelCase ,map_location=torch.device('cpu' ) ) # rename keys lowerCamelCase_ = rename_keys(__UpperCamelCase ) # create HuggingFace model and load state dict lowerCamelCase_ = PoolFormerForImageClassification(__UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) model.eval() # Define image processor lowerCamelCase_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase ) lowerCamelCase_ = image_processor(images=prepare_img() ,return_tensors='pt' ).pixel_values # forward pass lowerCamelCase_ = model(__UpperCamelCase ) lowerCamelCase_ = outputs.logits # define expected logit slices for different models if size == "s12": lowerCamelCase_ = torch.tensor([-0.3045, -0.6758, -0.4869] ) elif size == "s24": lowerCamelCase_ = torch.tensor([0.4402, -0.1374, -0.8045] ) elif size == "s36": lowerCamelCase_ = torch.tensor([-0.6080, -0.5133, -0.5898] ) elif size == "m36": lowerCamelCase_ = torch.tensor([0.3952, 0.2263, -1.2668] ) elif size == "m48": lowerCamelCase_ = torch.tensor([0.1167, -0.0656, -0.3423] ) else: raise ValueError(f'''Size {size} not supported''' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1e-2 ) # finally, save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument( "--model_name", default="poolformer_s12", type=str, help="Name of the model you'd like to convert.", ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) A_ = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
42
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : int = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Tuple = replicate(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = shard(_A ) __SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2''' __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained( _A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : List[str] = scheduler_params __SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A ) __SCREAMING_SNAKE_CASE : List[str] = shard(_A ) __SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
74
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = '▁' lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} lowerCAmelCase = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } lowerCAmelCase = {'vinai/bartpho-syllable': 1024} class _a ( UpperCamelCase__ ): _lowercase : Tuple = VOCAB_FILES_NAMES _lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP _lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Any = ['''input_ids''', '''attention_mask'''] def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None: """simple docstring""" lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) lowercase__ = vocab_file lowercase__ = monolingual_vocab_file lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility lowercase__ = {} lowercase__ = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids: lowercase__ = cnt cnt += 1 with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): lowercase__ = line.strip().split()[0] lowercase__ = len(self.fairseq_tokens_to_ids ) if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids: lowercase__ = len(self.fairseq_tokens_to_ids ) lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self: Tuple ) -> int: """simple docstring""" lowercase__ = self.__dict__.copy() lowercase__ = None lowercase__ = self.sp_model.serialized_model_proto() return state def __setstate__( self: List[str] , UpperCamelCase_: int ) -> List[Any]: """simple docstring""" lowercase__ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase__ = {} lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase_ ( self: List[str] ) -> List[str]: """simple docstring""" return len(self.fairseq_ids_to_tokens ) def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> List[str]: """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.fairseq_ids_to_tokens[index] def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> Dict: """simple docstring""" lowercase__ = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip() return out_string def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCamelCase_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowercase__ = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__ = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , '''wb''' ) as fi: lowercase__ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( UpperCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'{str(UpperCamelCase_ )} \n' ) return out_vocab_file, out_monolingual_vocab_file
43
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase_ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2FeatureExtractor"""] lowercase_ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
0
'''simple docstring''' import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase__ : def __init__( self : str,__A : int,__A : Union[str, Any]=1_3,__A : Optional[int]=7,__A : Dict=True,__A : Dict=True,__A : Optional[int]=True,__A : List[str]=True,__A : int=True,__A : int=False,__A : List[Any]=False,__A : Union[str, Any]=False,__A : Union[str, Any]=2,__A : str=9_9,__A : List[str]=0,__A : Any=3_2,__A : Optional[Any]=5,__A : Union[str, Any]=4,__A : List[Any]=0.1,__A : Tuple=0.1,__A : Dict=5_1_2,__A : Optional[int]=2,__A : List[str]=0.02,__A : Tuple=2,__A : Optional[int]=4,__A : List[Any]="last",__A : Optional[Any]=True,__A : Any=None,__A : Optional[Any]=0,): _lowerCamelCase : Any = parent _lowerCamelCase : Union[str, Any] = batch_size _lowerCamelCase : int = seq_length _lowerCamelCase : Optional[int] = is_training _lowerCamelCase : Any = use_input_lengths _lowerCamelCase : Any = use_token_type_ids _lowerCamelCase : Optional[Any] = use_labels _lowerCamelCase : Union[str, Any] = gelu_activation _lowerCamelCase : Optional[Any] = sinusoidal_embeddings _lowerCamelCase : Optional[int] = causal _lowerCamelCase : str = asm _lowerCamelCase : Optional[int] = n_langs _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Union[str, Any] = n_special _lowerCamelCase : Any = hidden_size _lowerCamelCase : Any = num_hidden_layers _lowerCamelCase : Optional[int] = num_attention_heads _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : Dict = attention_probs_dropout_prob _lowerCamelCase : Dict = max_position_embeddings _lowerCamelCase : Tuple = type_sequence_label_size _lowerCamelCase : Any = initializer_range _lowerCamelCase : Tuple = num_labels _lowerCamelCase : Any = num_choices _lowerCamelCase : List[str] = summary_type _lowerCamelCase : Union[str, Any] = use_proj _lowerCamelCase : Dict = scope _lowerCamelCase : List[Any] = bos_token_id def lowerCamelCase_ ( self : List[Any] ): _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size ) _lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCamelCase : Tuple = None if self.use_input_lengths: _lowerCamelCase : Optional[Any] = ( ids_tensor([self.batch_size],vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length _lowerCamelCase : Union[str, Any] = None if self.use_token_type_ids: _lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length],self.n_langs ) _lowerCamelCase : Optional[int] = None _lowerCamelCase : Dict = None _lowerCamelCase : int = None if self.use_labels: _lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size ) _lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels ) _lowerCamelCase : int = ids_tensor([self.batch_size],2 ).float() _lowerCamelCase : Dict = ids_tensor([self.batch_size],self.num_choices ) _lowerCamelCase : List[str] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowerCamelCase_ ( self : Union[str, Any] ): return XLMConfig( vocab_size=self.vocab_size,n_special=self.n_special,emb_dim=self.hidden_size,n_layers=self.num_hidden_layers,n_heads=self.num_attention_heads,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,gelu_activation=self.gelu_activation,sinusoidal_embeddings=self.sinusoidal_embeddings,asm=self.asm,causal=self.causal,n_langs=self.n_langs,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,summary_type=self.summary_type,use_proj=self.use_proj,num_labels=self.num_labels,bos_token_id=self.bos_token_id,) def lowerCamelCase_ ( self : Union[str, Any],__A : Optional[int],__A : Optional[int],__A : int,__A : List[Any],__A : List[str],__A : Optional[Any],__A : Optional[int],__A : Union[str, Any],__A : List[Any],): _lowerCamelCase : List[Any] = XLMModel(config=__A ) model.to(__A ) model.eval() _lowerCamelCase : int = model(__A,lengths=__A,langs=__A ) _lowerCamelCase : str = model(__A,langs=__A ) _lowerCamelCase : str = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : Dict,__A : Optional[int],__A : int,__A : Optional[int],__A : Union[str, Any],__A : Union[str, Any],__A : int,__A : Optional[int],__A : int,__A : List[str],): _lowerCamelCase : List[str] = XLMWithLMHeadModel(__A ) model.to(__A ) model.eval() _lowerCamelCase : List[Any] = model(__A,token_type_ids=__A,labels=__A ) self.parent.assertEqual(result.loss.shape,() ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self : Optional[int],__A : int,__A : List[Any],__A : Optional[int],__A : Union[str, Any],__A : str,__A : Tuple,__A : List[Any],__A : str,__A : List[Any],): _lowerCamelCase : List[str] = XLMForQuestionAnsweringSimple(__A ) model.to(__A ) model.eval() _lowerCamelCase : Dict = model(__A ) _lowerCamelCase : str = model(__A,start_positions=__A,end_positions=__A ) _lowerCamelCase : Optional[int] = outputs self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : Tuple,__A : int,__A : Union[str, Any],__A : Optional[Any],__A : Optional[int],__A : Optional[Any],__A : Dict,__A : int,__A : Union[str, Any],__A : List[Any],): _lowerCamelCase : List[str] = XLMForQuestionAnswering(__A ) model.to(__A ) model.eval() _lowerCamelCase : Dict = model(__A ) _lowerCamelCase : Dict = model( __A,start_positions=__A,end_positions=__A,cls_index=__A,is_impossible=__A,p_mask=__A,) _lowerCamelCase : str = model( __A,start_positions=__A,end_positions=__A,cls_index=__A,is_impossible=__A,) ((_lowerCamelCase) , ) : int = result_with_labels.to_tuple() _lowerCamelCase : int = model(__A,start_positions=__A,end_positions=__A ) ((_lowerCamelCase) , ) : Tuple = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape,() ) self.parent.assertEqual(result.start_top_log_probs.shape,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape,(self.batch_size,) ) def lowerCamelCase_ ( self : List[Any],__A : Optional[int],__A : str,__A : int,__A : Any,__A : str,__A : str,__A : Tuple,__A : int,__A : List[str],): _lowerCamelCase : Union[str, Any] = XLMForSequenceClassification(__A ) model.to(__A ) model.eval() _lowerCamelCase : Optional[Any] = model(__A ) _lowerCamelCase : Any = model(__A,labels=__A ) self.parent.assertEqual(result.loss.shape,() ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase_ ( self : List[Any],__A : Dict,__A : Union[str, Any],__A : List[str],__A : List[str],__A : Optional[int],__A : Optional[Any],__A : List[str],__A : str,__A : Optional[int],): _lowerCamelCase : int = self.num_labels _lowerCamelCase : List[Any] = XLMForTokenClassification(__A ) model.to(__A ) model.eval() _lowerCamelCase : Optional[int] = model(__A,attention_mask=__A,labels=__A ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : Optional[Any],__A : Dict,__A : Dict,__A : Union[str, Any],__A : Any,__A : int,__A : Tuple,__A : Any,__A : Union[str, Any],__A : List[Any],): _lowerCamelCase : Optional[int] = self.num_choices _lowerCamelCase : List[Any] = XLMForMultipleChoice(config=__A ) model.to(__A ) model.eval() _lowerCamelCase : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous() _lowerCamelCase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous() _lowerCamelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous() _lowerCamelCase : str = model( __A,attention_mask=__A,token_type_ids=__A,labels=__A,) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self : Optional[int] ): _lowerCamelCase : str = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : Optional[int] = config_and_inputs _lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class UpperCAmelCase__ ( A , A , A , unittest.TestCase ): lowerCAmelCase_ = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) lowerCAmelCase_ = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowerCAmelCase_ = ( { 'feature-extraction': XLMModel, 'fill-mask': XLMWithLMHeadModel, 'question-answering': XLMForQuestionAnsweringSimple, 'text-classification': XLMForSequenceClassification, 'text-generation': XLMWithLMHeadModel, 'token-classification': XLMForTokenClassification, 'zero-shot': XLMForSequenceClassification, } if is_torch_available() else {} ) def lowerCamelCase_ ( self : int,__A : Tuple,__A : Union[str, Any],__A : Optional[int],__A : Union[str, Any],__A : Any ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowerCamelCase_ ( self : List[Any],__A : List[Any],__A : Dict,__A : Union[str, Any]=False ): _lowerCamelCase : List[str] = super()._prepare_for_class(__A,__A,return_labels=__A ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": _lowerCamelCase : Optional[Any] = torch.zeros( self.model_tester.batch_size,dtype=torch.long,device=__A ) _lowerCamelCase : Dict = torch.zeros( self.model_tester.batch_size,dtype=torch.long,device=__A ) return inputs_dict def lowerCamelCase_ ( self : str ): _lowerCamelCase : int = XLMModelTester(self ) _lowerCamelCase : Optional[int] = ConfigTester(self,config_class=__A,emb_dim=3_7 ) def lowerCamelCase_ ( self : str ): self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Union[str, Any] ): _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*__A ) def lowerCamelCase_ ( self : Union[str, Any] ): _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*__A ) def lowerCamelCase_ ( self : int ): _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*__A ) def lowerCamelCase_ ( self : List[str] ): _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*__A ) def lowerCamelCase_ ( self : List[str] ): _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*__A ) def lowerCamelCase_ ( self : Dict ): _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*__A ) def lowerCamelCase_ ( self : List[Any] ): _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*__A ) def lowerCamelCase_ ( self : str,__A : Optional[Any],__A : str,__A : List[str],__A : Tuple,__A : Optional[Any],__A : Optional[int]=False,__A : Optional[int]=1 ): self.assertIsInstance(__A,__A ) self.assertListEqual( [isinstance(__A,__A ) for iter_attentions in attentions],[True] * len(__A ) ) self.assertEqual(len(__A ),(max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(__A ): # adds PAD dummy token _lowerCamelCase : Optional[Any] = min_length + idx + 1 _lowerCamelCase : int = min_length + idx + 1 _lowerCamelCase : Optional[int] = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions],[expected_shape] * len(__A ) ) def lowerCamelCase_ ( self : Optional[Any],__A : Union[str, Any],__A : Dict,__A : List[Any],__A : Dict,__A : List[str],__A : Union[str, Any]=False,__A : Dict=1 ): self.assertIsInstance(__A,__A ) self.assertListEqual( [isinstance(__A,__A ) for iter_hidden_states in hidden_states],[True] * len(__A ),) self.assertEqual(len(__A ),(max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(__A ): # adds PAD dummy token _lowerCamelCase : int = min_length + idx + 1 _lowerCamelCase : Any = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states],[expected_shape] * len(__A ),) pass @slow def lowerCamelCase_ ( self : Tuple ): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Dict = XLMModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @require_torch class UpperCAmelCase__ ( unittest.TestCase ): @slow def lowerCamelCase_ ( self : str ): _lowerCamelCase : int = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" ) model.to(__A ) _lowerCamelCase : int = torch.tensor([[1_4, 4_4_7]],dtype=torch.long,device=__A ) # the president _lowerCamelCase : Optional[Any] = [ 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference _lowerCamelCase : Optional[Any] = model.generate(__A,do_sample=__A ) self.assertListEqual(output_ids[0].cpu().numpy().tolist(),__A )
44
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileBertTokenizer lowerCAmelCase_ = MobileBertTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = filter_non_english lowerCAmelCase_ = '''google/mobilebert-uncased''' def UpperCAmelCase__ ( self : Dict ): """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __SCREAMING_SNAKE_CASE : int = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running''' return input_text, output_text def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase__ ( self : int ): """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) # With lower casing __SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __SCREAMING_SNAKE_CASE : Dict = {} for i, token in enumerate(_A ): __SCREAMING_SNAKE_CASE : List[str] = i __SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def UpperCAmelCase__ ( self : str ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus( _A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False __SCREAMING_SNAKE_CASE : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有'''] __SCREAMING_SNAKE_CASE : int = ''''''.join(_A ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE : List[Any] = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A ) ] self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A )
74
0
from dataclasses import dataclass, field from typing import Optional @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : Optional[str] = field( default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} ) _snake_case : Optional[str] = field( default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} ) _snake_case : Optional[str] = field( default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} ) _snake_case : Optional[str] = field( default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} ) _snake_case : Optional[int] = field(default=2 , metadata={"""help""": """Batch size for training."""} ) _snake_case : Optional[int] = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} ) _snake_case : Optional[float] = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} ) _snake_case : Optional[int] = field( default=10_000 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} ) _snake_case : Optional[float] = field(default=2e-4 , metadata={"""help""": """Learning rate fo training."""} ) _snake_case : Optional[str] = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} ) _snake_case : Optional[int] = field( default=750 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} ) _snake_case : Optional[int] = field( default=16 , metadata={"""help""": """Number of gradient accumulation steps."""} ) _snake_case : Optional[bool] = field( default=lowercase , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} ) _snake_case : Optional[int] = field(default=50_000 , metadata={"""help""": """Maximum number of training steps."""} ) _snake_case : Optional[int] = field( default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} ) _snake_case : Optional[int] = field(default=1_024 , metadata={"""help""": """Sequence lengths used for training."""} ) _snake_case : Optional[int] = field(default=1 , metadata={"""help""": """Training seed."""} ) _snake_case : Optional[int] = field( default=1_024 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} ) _snake_case : Optional[bool] = field(default=lowercase , metadata={"""help""": """If True the data is pretokenized."""} ) @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : Optional[str] = field( default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} ) _snake_case : Optional[str] = field( default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} ) _snake_case : Optional[int] = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} ) _snake_case : Optional[int] = field( default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} ) _snake_case : Optional[int] = field(default=1_024 , metadata={"""help""": """Length of sequences to be evaluated."""} ) _snake_case : Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} ) @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : Optional[str] = field( default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} ) _snake_case : Optional[int] = field(default=lowercase , metadata={"""help""": """Number of workers used for code evaluation."""} ) _snake_case : Optional[int] = field( default=lowercase , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , ) _snake_case : Optional[bool] = field( default=lowercase , metadata={"""help""": """Sample from the language model's output distribution."""} ) _snake_case : Optional[float] = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} ) _snake_case : Optional[int] = field(default=256 , metadata={"""help""": """Maximum number of newly generated tokens."""} ) _snake_case : Optional[int] = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} ) _snake_case : Optional[float] = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} ) _snake_case : Optional[int] = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""} ) _snake_case : Optional[int] = field( default=200 , metadata={"""help""": """Number of completions to generate for each sample."""} ) _snake_case : Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} ) _snake_case : Optional[str] = field( default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} ) _snake_case : Optional[str] = field( default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} ) _snake_case : Optional[int] = field( default=-1 , metadata={ """help""": ( """Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive""" """ number corresponds to which GPU device id to run on.""" ) } , ) @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : Optional[int] = field( default=lowercase , metadata={ """help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.""" } , ) _snake_case : Optional[str] = field( default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} ) _snake_case : Optional[str] = field( default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} ) _snake_case : Optional[int] = field( default=100_000 , metadata={"""help""": """Number of files to save per JSON output file."""} ) _snake_case : Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} ) _snake_case : Optional[float] = field( default=1_000 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} ) _snake_case : Optional[float] = field( default=100 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} ) _snake_case : Optional[float] = field( default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} ) _snake_case : Optional[float] = field( default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} ) _snake_case : Optional[float] = field( default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} ) _snake_case : Optional[str] = field( default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , ) _snake_case : Optional[bool] = field( default=lowercase , metadata={"""help""": """If True, near-duplicate samples are removed."""} ) _snake_case : Optional[float] = field( default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} ) @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : Optional[str] = field( default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} ) _snake_case : Optional[str] = field( default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} ) _snake_case : Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} ) _snake_case : Optional[int] = field(default=200_000 , metadata={"""help""": """Number of examples to train tokenizer on."""} ) _snake_case : Optional[int] = field( default=32_768 , metadata={"""help""": """Number of examples to train the tokenizer on."""} ) _snake_case : Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} ) _snake_case : Optional[bool] = field(default=lowercase , metadata={"""help""": """Push saved tokenizer to the hub."""} ) @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : Optional[str] = field( default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} ) _snake_case : Optional[str] = field( default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} ) _snake_case : Optional[str] = field( default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} ) _snake_case : Optional[int] = field(default=lowercase , metadata={"""help""": """Number of workers used for code evaluation."""} ) @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : Optional[str] = field( default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} ) _snake_case : Optional[str] = field( default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} ) _snake_case : Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} ) _snake_case : Optional[bool] = field(default=lowercase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
45
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ): """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
74
0
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase ) -> bool: '''simple docstring''' if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError("check_bouncy() accepts only integer arguments" ) _lowerCamelCase : Any = str(_lowerCamelCase ) _lowerCamelCase : List[str] = "".join(sorted(_lowerCamelCase ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def lowerCamelCase_( _lowerCamelCase = 99 ) -> int: '''simple docstring''' if not 0 < percent < 100: raise ValueError("solution() only accepts values from 0 to 100" ) _lowerCamelCase : Optional[Any] = 0 _lowerCamelCase : List[Any] = 1 while True: if check_bouncy(_lowerCamelCase ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(99)}''')
46
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast lowercase_ = datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase_ = 1_00_00 lowerCAmelCase_ = None lowerCAmelCase_ = None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase_ = ParquetConfig def UpperCAmelCase__ ( self : Any ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ): """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): __SCREAMING_SNAKE_CASE : Tuple = data_files if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __SCREAMING_SNAKE_CASE : int = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_A ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) ) break splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) ) return splits def UpperCAmelCase__ ( self : str , _A : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' ) raise
74
0
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] ): if height >= 1: move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) move_disk(lowerCamelCase_ , lowerCamelCase_ ) move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase__ ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Any ): print('moving disk from' , lowerCamelCase_ , 'to' , lowerCamelCase_ ) def UpperCAmelCase__ ( ): __a : str = int(input('Height of hanoi: ' ).strip() ) move_tower(lowerCamelCase_ , 'A' , 'B' , 'C' ) if __name__ == "__main__": main()
47
from math import isclose, sqrt def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x __SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4 __SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100 __SCREAMING_SNAKE_CASE : str = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __SCREAMING_SNAKE_CASE : int = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus __SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a__ ( snake_case = 1.4 , snake_case = -9.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : float = first_x_coord __SCREAMING_SNAKE_CASE : float = first_y_coord __SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
74
0
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def A ( ) -> List[Any]: '''simple docstring''' raise RuntimeError("CUDA out of memory." ) class A ( nn.Module ): def __init__( self : Dict ): """simple docstring""" super().__init__() lowerCAmelCase__ = nn.Linear(3 , 4 ) lowerCAmelCase__ = nn.BatchNormad(4 ) lowerCAmelCase__ = nn.Linear(4 , 5 ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : int ): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) ) class A ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(__magic_name__ : Optional[int] ): nonlocal batch_sizes batch_sizes.append(__magic_name__ ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(__magic_name__ , [128, 64, 32, 16, 8] ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(__magic_name__ : Tuple , __magic_name__ : Tuple ): nonlocal batch_sizes batch_sizes.append(__magic_name__ ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga lowerCAmelCase__ ,lowerCAmelCase__ = mock_training_loop_function("hello" ) self.assertListEqual(__magic_name__ , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, "hello"] ) def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(__magic_name__ : Optional[int] ): pass with self.assertRaises(__magic_name__ ) as cm: mock_training_loop_function() self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(__magic_name__ : Optional[Any] ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(__magic_name__ ) as cm: mock_training_loop_function() self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(__magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(__magic_name__ ) as cm: mock_training_loop_function(128 , "hello" , "world" ) self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] ) self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(__magic_name__ : Optional[Any] ): raise ValueError("Oops, we had an error!" ) with self.assertRaises(__magic_name__ ) as cm: mock_training_loop_function() self.assertIn("Oops, we had an error!" , cm.exception.args[0] ) @require_cuda def __SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" lowerCAmelCase__ = torch.cuda.memory_allocated() lowerCAmelCase__ = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , __magic_name__ ) lowerCAmelCase__ = release_memory(__magic_name__ ) self.assertEqual(torch.cuda.memory_allocated() , __magic_name__ )
48
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20} __SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : int = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : Optional[Any] = num_channels __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : int = min_resolution __SCREAMING_SNAKE_CASE : Optional[int] = max_resolution __SCREAMING_SNAKE_CASE : List[Any] = do_resize __SCREAMING_SNAKE_CASE : Union[str, Any] = size __SCREAMING_SNAKE_CASE : str = do_center_crop __SCREAMING_SNAKE_CASE : Any = crop_size def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''crop_size''' ) ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : int ): """simple docstring""" pass def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
74
0
"""simple docstring""" def lowercase__ ( snake_case_ :int = 50_000_000 ): __UpperCAmelCase = set() __UpperCAmelCase = int((limit - 24) ** (1 / 2) ) __UpperCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , snake_case_ ) ) ) for primea in primes: __UpperCAmelCase = primea * primea for primea in primes: __UpperCAmelCase = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: __UpperCAmelCase = primea * primea * primea * primea __UpperCAmelCase = square + cube + tetr if total >= limit: break ret.add(snake_case_ ) return len(snake_case_ ) if __name__ == "__main__": print(f"""{solution() = }""")
49
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )] # initialize interval's left pointer and right pointer __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0 for i in range(1 , len(snake_case ) ): # case when current index is inside the interval if i <= right_pointer: __SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __SCREAMING_SNAKE_CASE : Dict = min_edge while go_next(snake_case , snake_case , snake_case ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1 return z_result def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]] def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(snake_case ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
74
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCamelCase : Dict = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = ['PerceiverFeatureExtractor'] UpperCamelCase : Union[str, Any] = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
0
'''simple docstring''' def __snake_case ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] ) -> None: """simple docstring""" UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) print('''The following activities are selected:''' ) # The first activity is always selected UpperCAmelCase = 0 print(SCREAMING_SNAKE_CASE_ , end=''',''' ) # Consider rest of the activities for j in range(SCREAMING_SNAKE_CASE_ ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(SCREAMING_SNAKE_CASE_ , end=''',''' ) UpperCAmelCase = j if __name__ == "__main__": import doctest doctest.testmod() a__ : Dict = [1, 3, 0, 5, 8, 5] a__ : int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
51
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig() # derive patch size from model name __SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 __SCREAMING_SNAKE_CASE : Optional[Any] = 12 __SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 __SCREAMING_SNAKE_CASE : int = 4_096 __SCREAMING_SNAKE_CASE : Tuple = 16 __SCREAMING_SNAKE_CASE : Optional[int] = 24 __SCREAMING_SNAKE_CASE : Optional[int] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 if model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Any = 336 __SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Any = 768 return config def a__ ( snake_case ): """simple docstring""" # text encoder if name == "token_embedding.weight": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' ) if key.startswith('''visual''' ): __SCREAMING_SNAKE_CASE : List[Any] = key_split[3] __SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Optional[Any] = val[ :dim ] __SCREAMING_SNAKE_CASE : Tuple = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim: ] else: if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : str = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Dict = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[-dim:] elif key.startswith('''mit''' ): __SCREAMING_SNAKE_CASE : List[str] = key_split[2] __SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : str = val[:dim, :] __SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Any = val[:dim] __SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2] __SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:] else: __SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2] __SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[:dim, :] __SCREAMING_SNAKE_CASE : int = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Dict = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Tuple = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : int = val[-dim:] else: __SCREAMING_SNAKE_CASE : int = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __SCREAMING_SNAKE_CASE : int = val.T __SCREAMING_SNAKE_CASE : Union[str, Any] = val return orig_state_dict def a__ ( snake_case ): """simple docstring""" if num_frames == 8: __SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy''' elif num_frames == 16: __SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy''' elif num_frames == 32: __SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy''' __SCREAMING_SNAKE_CASE : List[str] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , ) __SCREAMING_SNAKE_CASE : int = np.load(snake_case ) return list(snake_case ) def a__ ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name] __SCREAMING_SNAKE_CASE : Any = 8 if "16-frames" in model_name: __SCREAMING_SNAKE_CASE : Optional[int] = 16 elif "shot" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 32 __SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin''' gdown.cached_download(snake_case , snake_case , quiet=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model'''] else: __SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224 __SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case ) __SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) __SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case ) # Verify outputs __SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video __SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 ) print('''Probs:''' , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] ) elif model_name == "xclip-base-patch16": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] ) elif model_name == "xclip-large-patch14": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case , organization='''nielsr''' ) processor.push_to_hub(snake_case , organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
0
"""simple docstring""" from typing import Dict from .base import GenericTensor, Pipeline class __lowercase ( _UpperCamelCase ): '''simple docstring''' def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ): if tokenize_kwargs is None: __a : Dict = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( '''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' ) __a : Tuple = truncation __a : List[Any] = tokenize_kwargs __a : List[str] = {} if return_tensors is not None: __a : Optional[Any] = return_tensors return preprocess_params, {}, postprocess_params def _lowerCamelCase ( self , _UpperCAmelCase , **_UpperCAmelCase ): __a : Optional[Any] = self.framework __a : Optional[Any] = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) return model_inputs def _lowerCamelCase ( self , _UpperCAmelCase ): __a : int = self.model(**_UpperCAmelCase ) return model_outputs def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=False ): # [0] is the first available tensor, logits or last_hidden_state. if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ): return super().__call__(*_UpperCAmelCase , **_UpperCAmelCase )
52
from pathlib import Path import fire def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = Path(snake_case ) __SCREAMING_SNAKE_CASE : Dict = Path(snake_case ) dest_dir.mkdir(exist_ok=snake_case ) for path in src_dir.iterdir(): __SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] __SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name ) print(snake_case ) dest_path.open('''w''' ).write('''\n'''.join(snake_case ) ) if __name__ == "__main__": fire.Fire(minify)
74
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case : Tuple = logging.get_logger(__name__) _snake_case : List[Any] = { 'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json', } class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" a_ = """git_vision_model""" def __init__( self : Dict , lowerCAmelCase_ : int=7_6_8 , lowerCAmelCase_ : int=3_0_7_2 , lowerCAmelCase_ : List[str]=1_2 , lowerCAmelCase_ : int=1_2 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : int=2_2_4 , lowerCAmelCase_ : int=1_6 , lowerCAmelCase_ : Tuple="quick_gelu" , lowerCAmelCase_ : List[Any]=1e-5 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : Optional[int]=0.02 , **lowerCAmelCase_ : List[str] , ) -> Union[str, Any]: super().__init__(**lowerCAmelCase_ ) __lowerCAmelCase = hidden_size __lowerCAmelCase = intermediate_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = num_channels __lowerCAmelCase = patch_size __lowerCAmelCase = image_size __lowerCAmelCase = initializer_range __lowerCAmelCase = attention_dropout __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = hidden_act @classmethod def lowercase ( cls : Any , lowerCAmelCase_ : Union[str, os.PathLike] , **lowerCAmelCase_ : Union[str, Any] ) -> "PretrainedConfig": cls._set_token_in_kwargs(lowerCAmelCase_ ) __lowerCAmelCase , __lowerCAmelCase = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ ) # get the vision config dict if we are loading from GITConfig if config_dict.get('model_type' ) == "git": __lowerCAmelCase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ ) class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" a_ = """git""" def __init__( self : Optional[int] , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]=3_0_5_2_2 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : str=6 , lowerCAmelCase_ : List[str]=1_2 , lowerCAmelCase_ : str=3_0_7_2 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[Any]=1_0_2_4 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : List[str]=1e-12 , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : Any="absolute" , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Dict=1_0_1 , lowerCAmelCase_ : Union[str, Any]=1_0_2 , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Dict , ) -> List[Any]: super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) if vision_config is None: __lowerCAmelCase = {} logger.info('vision_config is None. initializing the GitVisionConfig with default values.' ) __lowerCAmelCase = GitVisionConfig(**lowerCAmelCase_ ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = position_embedding_type __lowerCAmelCase = use_cache __lowerCAmelCase = tie_word_embeddings __lowerCAmelCase = num_image_with_embedding __lowerCAmelCase = bos_token_id __lowerCAmelCase = eos_token_id def lowercase ( self : List[str] ) -> List[Any]: __lowerCAmelCase = copy.deepcopy(self.__dict__ ) __lowerCAmelCase = self.vision_config.to_dict() __lowerCAmelCase = self.__class__.model_type return output
53
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 ) __SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 ) __SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
74
0
import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device __lowercase : Optional[Any] =False class A ( unittest.TestCase ): pass @slow @require_torch_gpu class A ( unittest.TestCase ): def lowerCAmelCase__ ( self: Tuple ) -> Any: '''simple docstring''' UpperCAmelCase_ =VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) UpperCAmelCase_ =load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) UpperCAmelCase_ =torch.manual_seed(0 ) UpperCAmelCase_ =pipe( image=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images UpperCAmelCase_ =image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ =np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
54
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} ) lowerCAmelCase_ = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) lowerCAmelCase_ = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = {} if self.train_dir is not None: __SCREAMING_SNAKE_CASE : Dict = self.train_dir if self.validation_dir is not None: __SCREAMING_SNAKE_CASE : Any = self.validation_dir __SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) lowerCAmelCase_ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class __UpperCamelCase : """simple docstring""" def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = input_size __SCREAMING_SNAKE_CASE : List[str] = mask_patch_size __SCREAMING_SNAKE_CASE : Dict = model_patch_size __SCREAMING_SNAKE_CASE : int = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('''Input size must be divisible by mask patch size''' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('''Mask patch size must be divisible by model patch size''' ) __SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size __SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size __SCREAMING_SNAKE_CASE : int = self.rand_size**2 __SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count] __SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = 1 __SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) ) __SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] ) __SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def a__ ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mim''' , snake_case , snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level() logger.setLevel(snake_case ) transformers.utils.logging.set_verbosity(snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. __SCREAMING_SNAKE_CASE : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0: __SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split ) __SCREAMING_SNAKE_CASE : int = split['''train'''] __SCREAMING_SNAKE_CASE : Dict = split['''test'''] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE : List[Any] = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: __SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(snake_case , '''decoder_type''' ): __SCREAMING_SNAKE_CASE : Any = '''simmim''' # adapt config __SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size __SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size __SCREAMING_SNAKE_CASE : str = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { '''image_size''': model_args.image_size, '''patch_size''': model_args.patch_size, '''encoder_stride''': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: __SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } __SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case ) if training_args.do_train: __SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names else: __SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names if data_args.image_column_name is not None: __SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name elif "image" in column_names: __SCREAMING_SNAKE_CASE : str = '''image''' elif "img" in column_names: __SCREAMING_SNAKE_CASE : List[str] = '''img''' else: __SCREAMING_SNAKE_CASE : Tuple = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py __SCREAMING_SNAKE_CASE : Any = Compose( [ Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator __SCREAMING_SNAKE_CASE : str = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(snake_case ): __SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]] __SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE : Union[str, Any] = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case ) # Initialize our trainer __SCREAMING_SNAKE_CASE : List[str] = Trainer( model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE : int = last_checkpoint __SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate() trainer.log_metrics('''eval''' , snake_case ) trainer.save_metrics('''eval''' , snake_case ) # Write model card and (optionally) push to hub __SCREAMING_SNAKE_CASE : Optional[Any] = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''masked-image-modeling''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-image-modeling'''], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case ) else: trainer.create_model_card(**snake_case ) if __name__ == "__main__": main()
74
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def UpperCAmelCase ( a_ ) -> int: """simple docstring""" __A = 3_8_4 if "tiny" in model_name: __A = [3, 3, 9, 3] __A = [9_6, 1_9_2, 3_8_4, 7_6_8] if "small" in model_name: __A = [3, 3, 2_7, 3] __A = [9_6, 1_9_2, 3_8_4, 7_6_8] if "base" in model_name: __A = [3, 3, 2_7, 3] __A = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4] __A = 5_1_2 if "large" in model_name: __A = [3, 3, 2_7, 3] __A = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6] __A = 7_6_8 if "xlarge" in model_name: __A = [3, 3, 2_7, 3] __A = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] __A = 1_0_2_4 # set label information __A = 1_5_0 __A = "huggingface/label-files" __A = "ade20k-id2label.json" __A = json.load(open(hf_hub_download(a_ , a_ , repo_type="dataset" ) , "r" ) ) __A = {int(a_ ): v for k, v in idalabel.items()} __A = {v: k for k, v in idalabel.items()} __A = ConvNextConfig( depths=a_ , hidden_sizes=a_ , out_features=["stage1", "stage2", "stage3", "stage4"] ) __A = UperNetConfig( backbone_config=a_ , auxiliary_in_channels=a_ , num_labels=a_ , idalabel=a_ , labelaid=a_ , ) return config def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" __A = [] # fmt: off # stem rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") ) rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") ) rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") ) rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[int]: """simple docstring""" __A = dct.pop(a_ ) __A = val def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]: """simple docstring""" __A = { "upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth", "upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth", "upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth", "upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth", "upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth", } __A = model_name_to_url[model_name] __A = torch.hub.load_state_dict_from_url(a_ , map_location="cpu" )["state_dict"] __A = get_upernet_config(a_ ) __A = UperNetForSemanticSegmentation(a_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): __A = state_dict.pop(a_ ) if "bn" in key: __A = key.replace("bn" , "batch_norm" ) __A = val # rename keys __A = create_rename_keys(a_ ) for src, dest in rename_keys: rename_key(a_ , a_ , a_ ) model.load_state_dict(a_ ) # verify on image __A = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" __A = Image.open(requests.get(a_ , stream=a_ ).raw ).convert("RGB" ) __A = SegformerImageProcessor() __A = processor(a_ , return_tensors="pt" ).pixel_values with torch.no_grad(): __A = model(a_ ) if model_name == "upernet-convnext-tiny": __A = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ) elif model_name == "upernet-convnext-small": __A = torch.tensor( [[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] ) elif model_name == "upernet-convnext-base": __A = torch.tensor( [[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] ) elif model_name == "upernet-convnext-large": __A = torch.tensor( [[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] ) elif model_name == "upernet-convnext-xlarge": __A = torch.tensor( [[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] ) print("Logits:" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , a_ , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(a_ ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(a_ ) if push_to_hub: print(F'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(F'''openmmlab/{model_name}''' ) processor.push_to_hub(F'''openmmlab/{model_name}''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-convnext-tiny', type=str, choices=[f'''upernet-convnext-{size}''' for size in ['tiny', 'small', 'base', 'large', 'xlarge']], help='Name of the ConvNext UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) SCREAMING_SNAKE_CASE :List[str] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
55
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''data2vec-vision''' def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Any = hidden_size __SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = initializer_range __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : Any = image_size __SCREAMING_SNAKE_CASE : Optional[int] = patch_size __SCREAMING_SNAKE_CASE : Any = num_channels __SCREAMING_SNAKE_CASE : List[str] = use_mask_token __SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings __SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias __SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value __SCREAMING_SNAKE_CASE : str = drop_path_rate __SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : str = out_indices __SCREAMING_SNAKE_CASE : List[str] = pool_scales # auxiliary head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight __SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels __SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input __SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return 1e-4
74
0
'''simple docstring''' def _a (lowercase__ : float , lowercase__ : float ) -> float: """simple docstring""" if mass < 0: raise ValueError('The mass of a body cannot be negative' ) return 0.5 * mass * abs(lowercase__ ) * abs(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
56
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=7 , _A : List[str]=True , _A : Dict=True , _A : Tuple=False , _A : Union[str, Any]=True , _A : List[str]=99 , _A : Union[str, Any]=32 , _A : str=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : Optional[int]=4 , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : str = seq_length __SCREAMING_SNAKE_CASE : int = is_training __SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask __SCREAMING_SNAKE_CASE : str = use_token_type_ids __SCREAMING_SNAKE_CASE : Any = use_labels __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : Any = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads __SCREAMING_SNAKE_CASE : List[str] = intermediate_size __SCREAMING_SNAKE_CASE : List[str] = hidden_act __SCREAMING_SNAKE_CASE : int = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size __SCREAMING_SNAKE_CASE : int = initializer_range __SCREAMING_SNAKE_CASE : List[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = num_choices __SCREAMING_SNAKE_CASE : Union[str, Any] = scope def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: __SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE : Any = None __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : int = None if self.use_labels: __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForMaskedLM(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : int = model( _A , attention_mask=_A , start_positions=_A , end_positions=_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels __SCREAMING_SNAKE_CASE : List[Any] = DistilBertForSequenceClassification(_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : List[str] , _A : int , _A : List[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForTokenClassification(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : int , _A : Optional[int] , _A : List[Any] , _A : int , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.num_choices __SCREAMING_SNAKE_CASE : int = DistilBertForMultipleChoice(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Optional[Any] = model( _A , attention_mask=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs __SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCAmelCase_ = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self ) __SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , dim=37 ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*_A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*_A ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*_A ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @slow @require_torch_gpu def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A ) __SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace( _A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) ) __SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A ) loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )[0] __SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _A ) __SCREAMING_SNAKE_CASE : Any = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
74
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() A_ : Tuple = logging.get_logger(__name__) A_ : Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } A_ : int = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def snake_case (UpperCAmelCase__ ) -> str: UpperCamelCase_: Tuple = {} with open(UpperCAmelCase__ , 'r' ) as file: for line_number, line in enumerate(UpperCAmelCase__ ): UpperCamelCase_: List[Any] = line.strip() if line: UpperCamelCase_: List[Any] = line.split() UpperCamelCase_: Optional[Any] = line_number UpperCamelCase_: Any = words[0] UpperCamelCase_: List[Any] = value return result def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: for attribute in key.split('.' ): UpperCamelCase_: str = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_: str = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(UpperCAmelCase__ ): UpperCamelCase_: Any = PARAM_MAPPING[full_name.split('.' )[-1]] UpperCamelCase_: Dict = 'param' if weight_type is not None and weight_type != "param": UpperCamelCase_: Optional[Any] = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape elif weight_type is not None and weight_type == "param": UpperCamelCase_: Optional[Any] = hf_pointer for attribute in hf_param_name.split('.' ): UpperCamelCase_: str = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_: Tuple = shape_pointer.shape # let's reduce dimension UpperCamelCase_: int = value[0] else: UpperCamelCase_: Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCamelCase_: Optional[int] = value elif weight_type == "weight_g": UpperCamelCase_: Any = value elif weight_type == "weight_v": UpperCamelCase_: Union[str, Any] = value elif weight_type == "bias": UpperCamelCase_: Union[str, Any] = value elif weight_type == "param": for attribute in hf_param_name.split('.' ): UpperCamelCase_: Dict = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_: Optional[Any] = value else: UpperCamelCase_: int = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any: UpperCamelCase_: Union[str, Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(UpperCAmelCase__ ): UpperCamelCase_: Dict = PARAM_MAPPING[full_name.split('.' )[-1]] UpperCamelCase_: List[Any] = 'param' if weight_type is not None and weight_type != "param": UpperCamelCase_: List[Any] = '.'.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": UpperCamelCase_: Any = '.'.join([key, hf_param_name] ) else: UpperCamelCase_: Union[str, Any] = key UpperCamelCase_: Any = value if 'lm_head' in full_key else value[0] A_ : str = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None ) -> Any: UpperCamelCase_: Optional[int] = False for key, mapped_key in MAPPING.items(): UpperCamelCase_: Tuple = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: UpperCamelCase_: Optional[Any] = True if "*" in mapped_key: UpperCamelCase_: Optional[int] = name.split(UpperCAmelCase__ )[0].split('.' )[-2] UpperCamelCase_: Any = mapped_key.replace('*' , UpperCAmelCase__ ) if "weight_g" in name: UpperCamelCase_: Union[str, Any] = 'weight_g' elif "weight_v" in name: UpperCamelCase_: Dict = 'weight_v' elif "bias" in name: UpperCamelCase_: int = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase_: str = 'weight' else: UpperCamelCase_: Union[str, Any] = None if hf_dict is not None: rename_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) else: set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return is_used return is_used def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: UpperCamelCase_: List[Any] = [] UpperCamelCase_: Dict = fairseq_model.state_dict() UpperCamelCase_: Optional[Any] = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase_: Union[str, Any] = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase_: List[Any] = True else: UpperCamelCase_: Tuple = load_wavaveca_layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if not is_used: unused_weights.append(UpperCAmelCase__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any: UpperCamelCase_: Any = full_name.split('conv_layers.' )[-1] UpperCamelCase_: int = name.split('.' ) UpperCamelCase_: int = int(items[0] ) UpperCamelCase_: Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCamelCase_: Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCamelCase_: int = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCamelCase_: Union[str, Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCamelCase_: List[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCAmelCase__ ) @torch.no_grad() def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__=False ) -> Dict: if config_path is not None: UpperCamelCase_: Tuple = WavaVecaConfig.from_pretrained(UpperCAmelCase__ ) else: UpperCamelCase_: List[str] = WavaVecaConfig() if is_seq_class: UpperCamelCase_: int = read_txt_into_dict(UpperCAmelCase__ ) UpperCamelCase_: Tuple = idalabel UpperCamelCase_: str = WavaVecaForSequenceClassification(UpperCAmelCase__ ) UpperCamelCase_: Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ) feature_extractor.save_pretrained(UpperCAmelCase__ ) elif is_finetuned: if dict_path: UpperCamelCase_: List[Any] = Dictionary.load(UpperCAmelCase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase_: Dict = target_dict.pad_index UpperCamelCase_: Tuple = target_dict.bos_index UpperCamelCase_: Optional[Any] = target_dict.eos_index UpperCamelCase_: Union[str, Any] = len(target_dict.symbols ) UpperCamelCase_: int = os.path.join(UpperCAmelCase__ , 'vocab.json' ) if not os.path.isdir(UpperCAmelCase__ ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase__ ) ) return os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ ) UpperCamelCase_: str = target_dict.indices # fairseq has the <pad> and <s> switched UpperCamelCase_: List[str] = 0 UpperCamelCase_: List[Any] = 1 with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_: Union[str, Any] = WavaVecaCTCTokenizer( UpperCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase__ , ) UpperCamelCase_: Any = True if config.feat_extract_norm == 'layer' else False UpperCamelCase_: Tuple = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ) UpperCamelCase_: Dict = WavaVecaProcessor(feature_extractor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ ) processor.save_pretrained(UpperCAmelCase__ ) UpperCamelCase_: Any = WavaVecaForCTC(UpperCAmelCase__ ) else: UpperCamelCase_: Any = WavaVecaForPreTraining(UpperCAmelCase__ ) if is_finetuned or is_seq_class: UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: UpperCamelCase_: List[str] = argparse.Namespace(task='audio_pretraining' ) UpperCamelCase_: Any = fairseq.tasks.setup_task(UpperCAmelCase__ ) UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase__ ) UpperCamelCase_: str = model[0].eval() recursively_load_weights(UpperCAmelCase__ , UpperCAmelCase__ , not is_finetuned ) hf_wavavec.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ : str = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) A_ : int = parser.parse_args() A_ : str = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
57
import logging import os import threading import time try: import warnings except ImportError: lowercase_ = None try: import msvcrt except ImportError: lowercase_ = None try: import fcntl except ImportError: lowercase_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowercase_ = OSError # Data # ------------------------------------------------ lowercase_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] lowercase_ = """3.0.12""" lowercase_ = None def a__ ( ): """simple docstring""" global _logger __SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ ) return _logger class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file return None def __str__( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[Any] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = lock return None def __enter__( self : Any ): """simple docstring""" return self.lock def __exit__( self : str , _A : Any , _A : int , _A : Any ): """simple docstring""" self.lock.release() return None class __UpperCamelCase : """simple docstring""" def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A ) # The path to the lock file. __SCREAMING_SNAKE_CASE : Tuple = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __SCREAMING_SNAKE_CASE : str = None # The default timeout value. __SCREAMING_SNAKE_CASE : Any = timeout # We use this lock primarily for the lock counter. __SCREAMING_SNAKE_CASE : int = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __SCREAMING_SNAKE_CASE : int = 0 return None @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._timeout @timeout.setter def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = float(_A ) return None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" raise NotImplementedError() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" raise NotImplementedError() @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file_fd is not None def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ): """simple docstring""" if timeout is None: __SCREAMING_SNAKE_CASE : Optional[int] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __SCREAMING_SNAKE_CASE : Tuple = id(self ) __SCREAMING_SNAKE_CASE : Any = self._lock_file __SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(_A ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCAmelCase__ ( self : int , _A : List[str]=False ): """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __SCREAMING_SNAKE_CASE : Optional[int] = id(self ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() __SCREAMING_SNAKE_CASE : int = 0 logger().debug(F'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : int ): """simple docstring""" self.acquire() return self def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ): """simple docstring""" self.release() return None def __del__( self : int ): """simple docstring""" self.release(force=_A ) return None def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = os.path.basename(_A ) if len(_A ) > max_length and max_length > 0: __SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) ) __SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(_A , _A ) else: return path class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ): """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(_A , timeout=_A , max_filename_length=_A ) __SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A ) except OSError: pass else: try: msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : str = fd return None def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self._lock_file_fd __SCREAMING_SNAKE_CASE : int = None msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 ) os.close(_A ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax super().__init__(_A , timeout=_A , max_filename_length=_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC __SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A ) try: fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : int = fd return None def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd __SCREAMING_SNAKE_CASE : Any = None fcntl.flock(_A , fcntl.LOCK_UN ) os.close(_A ) return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A ) except OSError: pass else: __SCREAMING_SNAKE_CASE : List[str] = fd return None def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" os.close(self._lock_file_fd ) __SCREAMING_SNAKE_CASE : Optional[Any] = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowercase_ = None if msvcrt: lowercase_ = WindowsFileLock elif fcntl: lowercase_ = UnixFileLock else: lowercase_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
74
0
"""simple docstring""" import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(_lowercase ): snake_case_ : int = AutoConfig.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) snake_case_ : str = FlaxAutoModel.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) @slow def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: with self.subTest(_lowercase ): snake_case_ : str = AutoConfig.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) snake_case_ : Dict = FlaxAutoModel.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) @slow def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_lowercase ) snake_case_ : Optional[int] = FlaxBertModel.from_pretrained(_lowercase ) snake_case_ : int = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX ) @jax.jit def eval(**_lowercase ): return model(**_lowercase ) eval(**_lowercase ).block_until_ready() @slow def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: snake_case_ : Any = AutoTokenizer.from_pretrained(_lowercase ) snake_case_ : Tuple = FlaxRobertaModel.from_pretrained(_lowercase ) snake_case_ : Union[str, Any] = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX ) @jax.jit def eval(**_lowercase ): return model(**_lowercase ) eval(**_lowercase ).block_until_ready() def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' with self.assertRaisesRegex( _lowercase , """bert-base is not a local folder and is not a valid model identifier""" ): snake_case_ : Optional[int] = FlaxAutoModel.from_pretrained("""bert-base""" ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' with self.assertRaisesRegex( _lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): snake_case_ : List[Any] = FlaxAutoModel.from_pretrained(_lowercase , revision="""aaaaaa""" ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' with self.assertRaisesRegex( _lowercase , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ): snake_case_ : int = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' with self.assertRaisesRegex(_lowercase , """Use `from_pt=True` to load this model""" ): snake_case_ : Dict = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
58
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[Any] , **_A : Dict ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**_A ) def UpperCAmelCase__ ( self : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : Any = [] __SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) ) __SCREAMING_SNAKE_CASE : Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' ) __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = [] for element in html_code.descendants: if type(_A ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip() if not text_in_this_tag: continue all_doc_strings.append(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A ) stringaxtag_seq.append(_A ) stringaxsubs_seq.append(_A ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' for tagname, subs in zip(_A , _A ): xpath += F'''/{tagname}''' if subs != 0: xpath += F'''[{subs}]''' return xpath def __call__( self : Optional[int] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = False # Check that strings has a valid type if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = True elif isinstance(_A , (list, tuple) ): if len(_A ) == 0 or isinstance(html_strings[0] , _A ): __SCREAMING_SNAKE_CASE : List[Any] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F'''but is of type {type(_A )}.''' ) __SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) ) if not is_batched: __SCREAMING_SNAKE_CASE : Dict = [html_strings] # Get nodes + xpaths __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Tuple = [] for html_string in html_strings: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A ) nodes.append(_A ) __SCREAMING_SNAKE_CASE : Dict = [] for node, tag_list, sub_list in zip(_A , _A , _A ): __SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A ) xpath_strings.append(_A ) xpaths.append(_A ) # return as Dict __SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths} __SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A ) return encoded_inputs
74
0
import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __A = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") def lowerCAmelCase_ ( __a , __a , __a = 16000 ) -> List[Any]: """simple docstring""" lowerCamelCase__: List[str] =int(round(sample_rate * max_length ) ) if len(__a ) <= sample_length: return wav lowerCamelCase__: List[Any] =randint(0 , len(__a ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Name of a dataset from the datasets package"} ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "A file containing the training audio paths and labels."} ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "A file containing the validation audio paths and labels."} ) lowercase_ = field( default="train" , metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" } , ) lowercase_ = field( default="validation" , metadata={ "help": ( "The name of the training data set split to use (via the datasets library). Defaults to 'validation'" ) } , ) lowercase_ = field( default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , ) lowercase_ = field( default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) lowercase_ = field( default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , ) @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = field( default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} ) lowercase_ = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Name or path of preprocessor config."} ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the feature encoder layers of the model."} ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to generate an attention mask in the feature extractor."} ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , ) def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple: '''simple docstring''' if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( "The argument `--freeze_feature_extractor` is deprecated and " "will be removed in a future version. Use `--freeze_feature_encoder`" "instead. Setting `freeze_feature_encoder==True`." , UpperCAmelCase_ , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( "The argument `--freeze_feature_extractor` is deprecated and " "should not be used in combination with `--freeze_feature_encoder`." "Only make use of `--freeze_feature_encoder`.") def lowerCAmelCase_ ( ) -> Any: """simple docstring""" lowerCamelCase__: List[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_audio_classification" , __a , __a ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowerCamelCase__: Optional[Any] =training_args.get_process_log_level() logger.setLevel(__a ) transformers.utils.logging.set_verbosity(__a ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """ + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. lowerCamelCase__: Any =None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCamelCase__: Union[str, Any] =get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to train from scratch." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset and prepare it for the audio classification task. lowerCamelCase__: List[str] =DatasetDict() lowerCamelCase__: List[str] =load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) lowerCamelCase__: str =load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """ "Make sure to set `--audio_column_name` to the correct audio column - one of " F"""{", ".join(raw_datasets["train"].column_names )}.""" ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """ "Make sure to set `--label_column_name` to the correct text column - one of " F"""{", ".join(raw_datasets["train"].column_names )}.""" ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy lowerCamelCase__: Union[str, Any] =AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. lowerCamelCase__: List[str] =raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) lowerCamelCase__: int =feature_extractor.model_input_names[0] def train_transforms(__a ): lowerCamelCase__: List[str] =[] for audio in batch[data_args.audio_column_name]: lowerCamelCase__: str =random_subsample( audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(__a ) lowerCamelCase__: Dict =feature_extractor(__a , sampling_rate=feature_extractor.sampling_rate ) lowerCamelCase__: Tuple ={model_input_name: inputs.get(__a )} lowerCamelCase__: Any =list(batch[data_args.label_column_name] ) return output_batch def val_transforms(__a ): lowerCamelCase__: Dict =[audio["array"] for audio in batch[data_args.audio_column_name]] lowerCamelCase__: List[Any] =feature_extractor(__a , sampling_rate=feature_extractor.sampling_rate ) lowerCamelCase__: Optional[Any] ={model_input_name: inputs.get(__a )} lowerCamelCase__: Union[str, Any] =list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. lowerCamelCase__: Union[str, Any] =raw_datasets["train"].features[data_args.label_column_name].names lowerCamelCase__ , lowerCamelCase__: Union[str, Any] ={}, {} for i, label in enumerate(__a ): lowerCamelCase__: Tuple =str(__a ) lowerCamelCase__: Optional[Any] =label # Load the accuracy metric from the datasets package lowerCamelCase__: List[Any] =evaluate.load("accuracy" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(__a ): lowerCamelCase__: List[Any] =np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=__a , references=eval_pred.label_ids ) lowerCamelCase__: Dict =AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel=__a , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCamelCase__: Any =AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: lowerCamelCase__: List[str] =( raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(__a , output_all_columns=__a ) if training_args.do_eval: if data_args.max_eval_samples is not None: lowerCamelCase__: Union[str, Any] =( raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(__a , output_all_columns=__a ) # Initialize our trainer lowerCamelCase__: List[str] =Trainer( model=__a , args=__a , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=__a , tokenizer=__a , ) # Training if training_args.do_train: lowerCamelCase__: Union[str, Any] =None if training_args.resume_from_checkpoint is not None: lowerCamelCase__: Optional[Any] =training_args.resume_from_checkpoint elif last_checkpoint is not None: lowerCamelCase__: Tuple =last_checkpoint lowerCamelCase__: Any =trainer.train(resume_from_checkpoint=__a ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowerCamelCase__: str =trainer.evaluate() trainer.log_metrics("eval" , __a ) trainer.save_metrics("eval" , __a ) # Write model card and (optionally) push to hub lowerCamelCase__: List[str] ={ "finetuned_from": model_args.model_name_or_path, "tasks": "audio-classification", "dataset": data_args.dataset_name, "tags": ["audio-classification"], } if training_args.push_to_hub: trainer.push_to_hub(**__a ) else: trainer.create_model_card(**__a ) if __name__ == "__main__": main()
59
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger() def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ): """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": __SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case ) else: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case ) if hidden_sizes == 192: __SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case ) if hidden_sizes == 256: __SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case ) if hidden_sizes == 384: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case ) from_model.eval() __SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval() __SCREAMING_SNAKE_CASE : int = OrderedDict() __SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict() __SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() ) __SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() ) print(len(snake_case ) , len(snake_case ) ) for i in range(len(snake_case ) ): __SCREAMING_SNAKE_CASE : int = weights[og_keys[i]] our_model.load_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) ) __SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one." __SCREAMING_SNAKE_CASE : Union[str, Any] = name print(snake_case ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'''Pushed {checkpoint_name}''' ) def a__ ( snake_case , snake_case = None , snake_case = True ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json''' __SCREAMING_SNAKE_CASE : int = 1_000 __SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels) __SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE : Optional[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : str = idalabel __SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''levit-128S''': 128, '''levit-128''': 128, '''levit-192''': 192, '''levit-256''': 256, '''levit-384''': 384, } __SCREAMING_SNAKE_CASE : Optional[int] = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case ) return config, expected_shape if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) lowercase_ = parser.parse_args() lowercase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
74
0
from typing import Union import fire import torch from tqdm import tqdm def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = "cpu" , _UpperCamelCase = None ) -> None: """simple docstring""" snake_case_ : int = torch.load(_UpperCamelCase , map_location=_UpperCamelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(_UpperCamelCase , torch.Tensor ): raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' ) snake_case_ : Optional[Any] = v.half() if save_path is None: # overwrite src_path snake_case_ : Any = src_path torch.save(_UpperCamelCase , _UpperCamelCase ) if __name__ == "__main__": fire.Fire(convert)
60
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
0
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] ): """simple docstring""" lowerCAmelCase__ = RemBertConfig.from_json_file(lowerCAmelCase_ ) print("Building PyTorch model from configuration: {}".format(str(lowerCAmelCase_ ) ) ) lowerCAmelCase__ = RemBertModel(lowerCAmelCase_ ) # Load weights from tf checkpoint load_tf_weights_in_rembert(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Save pytorch-model print("Save PyTorch model to {}".format(lowerCAmelCase_ ) ) torch.save(model.state_dict() , lowerCAmelCase_ ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--rembert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained RemBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCamelCase = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
61
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowercase_ = logging.get_logger(__name__) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = set() __SCREAMING_SNAKE_CASE : str = [] def parse_line(snake_case ): for line in fp: if isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(snake_case ) buffer.clear() continue else: __SCREAMING_SNAKE_CASE : int = line.strip() buffer.append(snake_case ) if from_gh: for filename in os.listdir(snake_case ): __SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case ) if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with open(snake_case ) as fp: parse_line(snake_case ) else: try: with zipfile.ZipFile(snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with z.open(snake_case ) as fp: parse_line(snake_case ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = set() __SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) ) return selected_warnings if __name__ == "__main__": def a__ ( snake_case ): """simple docstring""" return values.split(''',''' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) lowercase_ = parser.parse_args() lowercase_ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowercase_ = extract_warnings(args.output_dir, args.targets) lowercase_ = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
74
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available snake_case = { """configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""], """configuration_data2vec_text""": [ """DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecTextConfig""", """Data2VecTextOnnxConfig""", ], """configuration_data2vec_vision""": [ """DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecVisionConfig""", """Data2VecVisionOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecAudioForAudioFrameClassification""", """Data2VecAudioForCTC""", """Data2VecAudioForSequenceClassification""", """Data2VecAudioForXVector""", """Data2VecAudioModel""", """Data2VecAudioPreTrainedModel""", ] snake_case = [ """DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecTextForCausalLM""", """Data2VecTextForMaskedLM""", """Data2VecTextForMultipleChoice""", """Data2VecTextForQuestionAnswering""", """Data2VecTextForSequenceClassification""", """Data2VecTextForTokenClassification""", """Data2VecTextModel""", """Data2VecTextPreTrainedModel""", ] snake_case = [ """DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecVisionForImageClassification""", """Data2VecVisionForMaskedImageModeling""", """Data2VecVisionForSemanticSegmentation""", """Data2VecVisionModel""", """Data2VecVisionPreTrainedModel""", ] if is_tf_available(): snake_case = [ """TFData2VecVisionForImageClassification""", """TFData2VecVisionForSemanticSegmentation""", """TFData2VecVisionModel""", """TFData2VecVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
62
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : Dict = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim __SCREAMING_SNAKE_CASE : Tuple = in_channels __SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A ) __SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A ) # 3. Define transformers blocks __SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList( [ BasicTransformerBlock( _A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , ) for d in range(_A ) ] ) __SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A ) def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape __SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames __SCREAMING_SNAKE_CASE : Dict = hidden_states __SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A ) __SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A ) # 2. Blocks for block in self.transformer_blocks: __SCREAMING_SNAKE_CASE : Optional[Any] = block( _A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , ) # 3. Output __SCREAMING_SNAKE_CASE : Any = self.proj_out(_A ) __SCREAMING_SNAKE_CASE : List[str] = ( hidden_states[None, None, :] .reshape(_A , _A , _A , _A , _A ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=_A )
74
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a ( lowercase__ , unittest.TestCase ): """simple docstring""" a : Optional[int] = KandinskyImgaImgPipeline a : List[Any] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image'] a : Union[str, Any] = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', ] a : Union[str, Any] = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] a : Optional[int] = False @property def UpperCAmelCase ( self : Optional[int] ) -> List[Any]: return 32 @property def UpperCAmelCase ( self : Optional[int] ) -> Any: return 32 @property def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: return self.time_input_dim @property def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: return self.time_input_dim * 4 @property def UpperCAmelCase ( self : Any ) -> Any: return 100 @property def UpperCAmelCase ( self : int ) -> str: __UpperCAmelCase : List[str] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def UpperCAmelCase ( self : Optional[int] ) -> List[str]: torch.manual_seed(0 ) __UpperCAmelCase : Dict = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) __UpperCAmelCase : int = MultilingualCLIP(__lowercase ) __UpperCAmelCase : List[Any] = text_encoder.eval() return text_encoder @property def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]: torch.manual_seed(0 ) __UpperCAmelCase : List[Any] = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __UpperCAmelCase : Dict = UNetaDConditionModel(**__lowercase ) return model @property def UpperCAmelCase ( self : Union[str, Any] ) -> str: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def UpperCAmelCase ( self : int ) -> Optional[int]: torch.manual_seed(0 ) __UpperCAmelCase : str = VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase ( self : Optional[Any] ) -> int: __UpperCAmelCase : str = self.dummy_text_encoder __UpperCAmelCase : List[Any] = self.dummy_tokenizer __UpperCAmelCase : Tuple = self.dummy_unet __UpperCAmelCase : Any = self.dummy_movq __UpperCAmelCase : List[Any] = { """num_train_timesteps""": 1000, """beta_schedule""": """linear""", """beta_start""": 0.00_085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } __UpperCAmelCase : Dict = DDIMScheduler(**__lowercase ) __UpperCAmelCase : List[Any] = { """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def UpperCAmelCase ( self : Any , __lowercase : List[Any] , __lowercase : int=0 ) -> Union[str, Any]: __UpperCAmelCase : Any = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowercase ) ).to(__lowercase ) __UpperCAmelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowercase ) # create init_image __UpperCAmelCase : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase ) __UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] __UpperCAmelCase : Tuple = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) ) if str(__lowercase ).startswith("""mps""" ): __UpperCAmelCase : List[Any] = torch.manual_seed(__lowercase ) else: __UpperCAmelCase : Tuple = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __UpperCAmelCase : Dict = { """prompt""": """horse""", """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def UpperCAmelCase ( self : List[Any] ) -> str: __UpperCAmelCase : Any = """cpu""" __UpperCAmelCase : Union[str, Any] = self.get_dummy_components() __UpperCAmelCase : Optional[Any] = self.pipeline_class(**__lowercase ) __UpperCAmelCase : Any = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) ) __UpperCAmelCase : Tuple = output.images __UpperCAmelCase : Optional[int] = pipe( **self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0] __UpperCAmelCase : Any = image[0, -3:, -3:, -1] __UpperCAmelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCAmelCase : int = np.array( [0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class a ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self : int ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self : str ) -> str: __UpperCAmelCase : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_img2img_frog.npy""" ) __UpperCAmelCase : Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) __UpperCAmelCase : int = """A red cartoon frog, 4k""" __UpperCAmelCase : Tuple = KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__lowercase ) __UpperCAmelCase : List[str] = KandinskyImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa ) __UpperCAmelCase : str = pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) __UpperCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = pipe_prior( __lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() __UpperCAmelCase : List[Any] = pipeline( __lowercase , image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , ) __UpperCAmelCase : int = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowercase , __lowercase )
63
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase_ = """src/diffusers""" lowercase_ = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowercase_ = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase_ = spec.loader.load_module() def a__ ( snake_case , snake_case ): """simple docstring""" return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = object_name.split('''.''' ) __SCREAMING_SNAKE_CASE : str = 0 # First let's find the module where our object lives. __SCREAMING_SNAKE_CASE : Any = parts[i] while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ): i += 1 if i < len(snake_case ): __SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] ) if i >= len(snake_case ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Now let's find the class / func in the code! __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(snake_case ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __SCREAMING_SNAKE_CASE : List[Any] = line_index while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index] return "".join(snake_case ) lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowercase_ = re.compile(R"""<FILL\s+[^>]*>""") def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = code.split('''\n''' ) __SCREAMING_SNAKE_CASE : Dict = 0 while idx < len(snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(snake_case ): return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0 if has_indent: __SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}''' __SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case ) return result[len('''class Bla:\n''' ) :] if has_indent else result def a__ ( snake_case , snake_case=False ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[str] = f.readlines() __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(snake_case ): __SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups() __SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case ) __SCREAMING_SNAKE_CASE : str = get_indent(snake_case ) __SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2 __SCREAMING_SNAKE_CASE : Dict = theoretical_indent __SCREAMING_SNAKE_CASE : Optional[int] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __SCREAMING_SNAKE_CASE : List[Any] = True while line_index < len(snake_case ) and should_continue: line_index += 1 if line_index >= len(snake_case ): break __SCREAMING_SNAKE_CASE : Any = lines[line_index] __SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index] __SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case ) # Remove any nested `Copied from` comments to avoid circular copies __SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups() __SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case ) if option.strip() == "all-casing": __SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code ) __SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] __SCREAMING_SNAKE_CASE : str = start_index + 1 if overwrite and len(snake_case ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) return diffs def a__ ( snake_case = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = [] for filename in all_files: __SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase_ = parser.parse_args() check_copies(args.fix_and_overwrite)
74
0
import inspect import unittest class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Any: try: import diffusers # noqa: F401 except ImportError: assert False def UpperCamelCase_ ( self ) -> List[str]: import diffusers from diffusers.dependency_versions_table import deps SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion''' elif backend == "invisible_watermark": SCREAMING_SNAKE_CASE__: int= '''invisible-watermark''' assert backend in deps, f'{backend} is not in the deps table!'
64
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : int = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Tuple = replicate(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = shard(_A ) __SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2''' __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained( _A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : List[str] = scheduler_params __SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A ) __SCREAMING_SNAKE_CASE : List[str] = shard(_A ) __SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
74
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase = { 'configuration_bridgetower': [ 'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BridgeTowerConfig', 'BridgeTowerTextConfig', 'BridgeTowerVisionConfig', ], 'processing_bridgetower': ['BridgeTowerProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['BridgeTowerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST', 'BridgeTowerForContrastiveLearning', 'BridgeTowerForImageAndTextRetrieval', 'BridgeTowerForMaskedLM', 'BridgeTowerModel', 'BridgeTowerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
65
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase_ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2FeatureExtractor"""] lowercase_ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
0
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) UpperCamelCase = { "sample_size": 32, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": 1_000, "block_out_channels": [32, 64], "attention_head_dim": 8, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } UpperCamelCase = { "sample_size": 64, "in_channels": 3, "out_channels": 3, "layers_per_block": 3, "num_class_embeds": 1_000, "block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4], "attention_head_dim": 64, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } UpperCamelCase = { "sample_size": 256, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": None, "block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], "attention_head_dim": 64, "down_block_types": [ "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "default", "upsample_type": "resnet", "downsample_type": "resnet", } UpperCamelCase = { "num_train_timesteps": 40, "sigma_min": 0.0_02, "sigma_max": 80.0, } UpperCamelCase = { "num_train_timesteps": 201, "sigma_min": 0.0_02, "sigma_max": 80.0, } UpperCamelCase = { "num_train_timesteps": 151, "sigma_min": 0.0_02, "sigma_max": 80.0, } def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict: if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple: _lowercase : List[str] = checkpoint[F"""{old_prefix}.in_layers.0.weight"""] _lowercase : int = checkpoint[F"""{old_prefix}.in_layers.0.bias"""] _lowercase : Optional[int] = checkpoint[F"""{old_prefix}.in_layers.2.weight"""] _lowercase : Optional[Any] = checkpoint[F"""{old_prefix}.in_layers.2.bias"""] _lowercase : Optional[Any] = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""] _lowercase : Optional[int] = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""] _lowercase : Union[str, Any] = checkpoint[F"""{old_prefix}.out_layers.0.weight"""] _lowercase : List[str] = checkpoint[F"""{old_prefix}.out_layers.0.bias"""] _lowercase : Optional[int] = checkpoint[F"""{old_prefix}.out_layers.3.weight"""] _lowercase : Optional[Any] = checkpoint[F"""{old_prefix}.out_layers.3.bias"""] if has_skip: _lowercase : Union[str, Any] = checkpoint[F"""{old_prefix}.skip_connection.weight"""] _lowercase : int = checkpoint[F"""{old_prefix}.skip_connection.bias"""] return new_checkpoint def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> List[str]: _lowercase , _lowercase , _lowercase : Union[str, Any] = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 ) _lowercase , _lowercase , _lowercase : Union[str, Any] = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 ) _lowercase : str = checkpoint[F"""{old_prefix}.norm.weight"""] _lowercase : Tuple = checkpoint[F"""{old_prefix}.norm.bias"""] _lowercase : int = weight_q.squeeze(-1 ).squeeze(-1 ) _lowercase : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 ) _lowercase : int = weight_k.squeeze(-1 ).squeeze(-1 ) _lowercase : Any = bias_k.squeeze(-1 ).squeeze(-1 ) _lowercase : List[str] = weight_v.squeeze(-1 ).squeeze(-1 ) _lowercase : List[str] = bias_v.squeeze(-1 ).squeeze(-1 ) _lowercase : Tuple = ( checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 ) ) _lowercase : Dict = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: _lowercase : str = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' ) _lowercase : str = {} _lowercase : str = checkpoint['time_embed.0.weight'] _lowercase : Tuple = checkpoint['time_embed.0.bias'] _lowercase : Optional[Any] = checkpoint['time_embed.2.weight'] _lowercase : int = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: _lowercase : Dict = checkpoint['label_emb.weight'] _lowercase : Tuple = checkpoint['input_blocks.0.0.weight'] _lowercase : Optional[Any] = checkpoint['input_blocks.0.0.bias'] _lowercase : Union[str, Any] = unet_config['down_block_types'] _lowercase : Tuple = unet_config['layers_per_block'] _lowercase : Any = unet_config['attention_head_dim'] _lowercase : List[Any] = unet_config['block_out_channels'] _lowercase : Optional[Any] = 1 _lowercase : List[str] = channels_list[0] for i, layer_type in enumerate(SCREAMING_SNAKE_CASE ): _lowercase : Tuple = channels_list[i] _lowercase : Tuple = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(SCREAMING_SNAKE_CASE ): _lowercase : str = F"""down_blocks.{i}.resnets.{j}""" _lowercase : Dict = F"""input_blocks.{current_layer}.0""" _lowercase : int = True if j == 0 and downsample_block_has_skip else False _lowercase : Optional[int] = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_skip=SCREAMING_SNAKE_CASE ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(SCREAMING_SNAKE_CASE ): _lowercase : Tuple = F"""down_blocks.{i}.resnets.{j}""" _lowercase : Union[str, Any] = F"""input_blocks.{current_layer}.0""" _lowercase : List[Any] = True if j == 0 and downsample_block_has_skip else False _lowercase : Union[str, Any] = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_skip=SCREAMING_SNAKE_CASE ) _lowercase : Dict = F"""down_blocks.{i}.attentions.{j}""" _lowercase : int = F"""input_blocks.{current_layer}.1""" _lowercase : Any = convert_attention( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) current_layer += 1 if i != len(SCREAMING_SNAKE_CASE ) - 1: _lowercase : int = F"""down_blocks.{i}.downsamplers.0""" _lowercase : List[Any] = F"""input_blocks.{current_layer}.0""" _lowercase : Tuple = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) current_layer += 1 _lowercase : List[Any] = current_channels # hardcoded the mid-block for now _lowercase : List[Any] = 'mid_block.resnets.0' _lowercase : Union[str, Any] = 'middle_block.0' _lowercase : Optional[int] = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _lowercase : Tuple = 'mid_block.attentions.0' _lowercase : Any = 'middle_block.1' _lowercase : str = convert_attention(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _lowercase : Optional[Any] = 'mid_block.resnets.1' _lowercase : Optional[Any] = 'middle_block.2' _lowercase : List[Any] = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _lowercase : Optional[int] = 0 _lowercase : List[str] = unet_config['up_block_types'] for i, layer_type in enumerate(SCREAMING_SNAKE_CASE ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): _lowercase : Tuple = F"""up_blocks.{i}.resnets.{j}""" _lowercase : int = F"""output_blocks.{current_layer}.0""" _lowercase : str = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_skip=SCREAMING_SNAKE_CASE ) current_layer += 1 if i != len(SCREAMING_SNAKE_CASE ) - 1: _lowercase : Dict = F"""up_blocks.{i}.upsamplers.0""" _lowercase : Optional[Any] = F"""output_blocks.{current_layer-1}.1""" _lowercase : Dict = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): _lowercase : Tuple = F"""up_blocks.{i}.resnets.{j}""" _lowercase : List[str] = F"""output_blocks.{current_layer}.0""" _lowercase : Optional[Any] = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_skip=SCREAMING_SNAKE_CASE ) _lowercase : str = F"""up_blocks.{i}.attentions.{j}""" _lowercase : Dict = F"""output_blocks.{current_layer}.1""" _lowercase : Optional[Any] = convert_attention( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) current_layer += 1 if i != len(SCREAMING_SNAKE_CASE ) - 1: _lowercase : int = F"""up_blocks.{i}.upsamplers.0""" _lowercase : str = F"""output_blocks.{current_layer-1}.2""" _lowercase : List[Any] = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _lowercase : List[Any] = checkpoint['out.0.weight'] _lowercase : Optional[Any] = checkpoint['out.0.bias'] _lowercase : Dict = checkpoint['out.2.weight'] _lowercase : Dict = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.") parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model." ) parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.") UpperCamelCase = parser.parse_args() UpperCamelCase = strabool(args.class_cond) UpperCamelCase = os.path.basename(args.unet_path) print(f'''Checkpoint: {ckpt_name}''') # Get U-Net config if "imagenet64" in ckpt_name: UpperCamelCase = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): UpperCamelCase = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: UpperCamelCase = TEST_UNET_CONFIG else: raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''') if not args.class_cond: UpperCamelCase = None UpperCamelCase = con_pt_to_diffuser(args.unet_path, unet_config) UpperCamelCase = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: UpperCamelCase = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: UpperCamelCase = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): UpperCamelCase = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''') UpperCamelCase = CMStochasticIterativeScheduler(**scheduler_config) UpperCamelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
66
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileBertTokenizer lowerCAmelCase_ = MobileBertTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = filter_non_english lowerCAmelCase_ = '''google/mobilebert-uncased''' def UpperCAmelCase__ ( self : Dict ): """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __SCREAMING_SNAKE_CASE : int = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running''' return input_text, output_text def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase__ ( self : int ): """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) # With lower casing __SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __SCREAMING_SNAKE_CASE : Dict = {} for i, token in enumerate(_A ): __SCREAMING_SNAKE_CASE : List[str] = i __SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def UpperCAmelCase__ ( self : str ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus( _A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False __SCREAMING_SNAKE_CASE : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有'''] __SCREAMING_SNAKE_CASE : int = ''''''.join(_A ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE : List[Any] = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A ) ] self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A )
74
0
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> List[str]: _lowercase = [] embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""", F"""stage{idx}.patch_embed.proj.weight""", ) ) embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""", F"""stage{idx}.patch_embed.proj.bias""", ) ) embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""", F"""stage{idx}.patch_embed.norm.weight""", ) ) embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""", F"""stage{idx}.patch_embed.norm.bias""", ) ) return embed def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :int ) -> Optional[int]: _lowercase = [] attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj.bias""", ) ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") ) return attention_weights def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Optional[Any]: _lowercase = [] token.append((F"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') ) return token def SCREAMING_SNAKE_CASE__ ( ) -> List[str]: _lowercase = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] , snake_case__ :int , snake_case__ :List[Any] , snake_case__ :Optional[Any] ) -> Any: _lowercase = 'imagenet-1k-id2label.json' _lowercase = 1000 _lowercase = 'huggingface/label-files' _lowercase = num_labels _lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) ) _lowercase = {int(snake_case__ ): v for k, v in idalabel.items()} _lowercase = idalabel _lowercase = {v: k for k, v in idalabel.items()} _lowercase = _lowercase = CvtConfig(num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13": _lowercase = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21": _lowercase = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: _lowercase = [2, 2, 20] _lowercase = [3, 12, 16] _lowercase = [192, 768, 1024] _lowercase = CvtForImageClassification(snake_case__ ) _lowercase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) _lowercase = image_size _lowercase = torch.load(snake_case__ , map_location=torch.device('cpu' ) ) _lowercase = OrderedDict() _lowercase = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: _lowercase = list_of_state_dict + cls_token(snake_case__ ) _lowercase = list_of_state_dict + embeddings(snake_case__ ) for cnt in range(config.depth[idx] ): _lowercase = list_of_state_dict + attention(snake_case__ , snake_case__ ) _lowercase = list_of_state_dict + final() for gg in list_of_state_dict: print(snake_case__ ) for i in range(len(snake_case__ ) ): _lowercase = original_weights[list_of_state_dict[i][1]] model.load_state_dict(snake_case__ ) model.save_pretrained(snake_case__ ) image_processor.save_pretrained(snake_case__ ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument( """--cvt_model""", default="""cvt-w24""", type=str, help="""Name of the cvt model you'd like to convert.""", ) parser.add_argument( """--image_size""", default=3_8_4, type=int, help="""Input Image Size""", ) parser.add_argument( """--cvt_file_name""", default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""", type=str, help="""Input Image Size""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) snake_case = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
67
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ): """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
74
0
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _A ( unittest.TestCase ): """simple docstring""" @property def _a ( self : List[str] ) -> Dict: torch.manual_seed(0 ) __UpperCAmelCase =UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def _a ( self : int ) -> Union[str, Any]: __UpperCAmelCase =self.dummy_uncond_unet __UpperCAmelCase =ScoreSdeVeScheduler() __UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) sde_ve.to(__SCREAMING_SNAKE_CASE ) sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.manual_seed(0 ) __UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =torch.manual_seed(0 ) __UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )[ 0 ] __UpperCAmelCase =image[0, -3:, -3:, -1] __UpperCAmelCase =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __UpperCAmelCase =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[int] ) -> int: __UpperCAmelCase ="""google/ncsnpp-church-256""" __UpperCAmelCase =UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ScoreSdeVeScheduler.from_pretrained(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) sde_ve.to(__SCREAMING_SNAKE_CASE ) sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.manual_seed(0 ) __UpperCAmelCase =sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) __UpperCAmelCase =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
68
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast lowercase_ = datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase_ = 1_00_00 lowerCAmelCase_ = None lowerCAmelCase_ = None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase_ = ParquetConfig def UpperCAmelCase__ ( self : Any ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ): """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): __SCREAMING_SNAKE_CASE : Tuple = data_files if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __SCREAMING_SNAKE_CASE : int = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_A ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) ) break splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) ) return splits def UpperCAmelCase__ ( self : str , _A : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' ) raise
74
0
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> List[str]: __snake_case = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] __snake_case = True if "large" in model_name or "huge" in model_name else False __snake_case = True if "large" in model_name or "huge" in model_name else False __snake_case = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __snake_case = [3, 3, 3, 3] __snake_case = [5, 5, 5, 5] elif "fl4" in model_name: __snake_case = [4, 4, 4, 4] __snake_case = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __snake_case = [3, 3, 3, 3] if "lrf" in model_name: __snake_case = [3, 3, 3, 3] else: __snake_case = [2, 2, 2, 2] if "tiny" in model_name: __snake_case = 96 elif "small" in model_name: __snake_case = 96 elif "base" in model_name: __snake_case = 1_28 elif "large" in model_name: __snake_case = 1_92 elif "xlarge" in model_name: __snake_case = 2_56 elif "huge" in model_name: __snake_case = 3_52 # set label information __snake_case = "huggingface/label-files" if "large" in model_name or "huge" in model_name: __snake_case = "imagenet-22k-id2label.json" else: __snake_case = "imagenet-1k-id2label.json" __snake_case = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) ) __snake_case = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __snake_case = {v: k for k, v in idalabel.items()} __snake_case = FocalNetConfig( embed_dim=_UpperCAmelCase , depths=_UpperCAmelCase , focal_levels=_UpperCAmelCase , focal_windows=_UpperCAmelCase , use_conv_embed=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase , use_post_layernorm=_UpperCAmelCase , use_layerscale=_UpperCAmelCase , ) return config def __UpperCAmelCase ( _UpperCAmelCase : Tuple ) -> Optional[Any]: if "patch_embed.proj" in name: __snake_case = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __snake_case = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: __snake_case = "encoder." + name if "encoder.layers" in name: __snake_case = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: __snake_case = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: __snake_case = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __snake_case = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __snake_case = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __snake_case = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": __snake_case = "layernorm.weight" if name == "norm.bias": __snake_case = "layernorm.bias" if "head" in name: __snake_case = name.replace("head" , "classifier" ) else: __snake_case = "focalnet." + name return name def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str]=False ) -> Dict: # fmt: off __snake_case = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on __snake_case = model_name_to_url[model_name] print("Checkpoint URL: " , _UpperCAmelCase ) __snake_case = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): __snake_case = state_dict.pop(_UpperCAmelCase ) __snake_case = val __snake_case = get_focalnet_config(_UpperCAmelCase ) __snake_case = FocalNetForImageClassification(_UpperCAmelCase ) model.eval() # load state dict model.load_state_dict(_UpperCAmelCase ) # verify conversion __snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg" __snake_case = BitImageProcessor( do_resize=_UpperCAmelCase , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=_UpperCAmelCase , crop_size=2_24 , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , ) __snake_case = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ) __snake_case = processor(images=_UpperCAmelCase , return_tensors="pt" ) __snake_case = transforms.Compose( [ transforms.Resize(2_56 ), transforms.CenterCrop(2_24 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) __snake_case = image_transforms(_UpperCAmelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , _UpperCAmelCase , atol=1E-4 ) __snake_case = model(**_UpperCAmelCase ) __snake_case = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __snake_case = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": __snake_case = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": __snake_case = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": __snake_case = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": __snake_case = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": __snake_case = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_UpperCAmelCase ) processor.save_pretrained(_UpperCAmelCase ) if push_to_hub: print(F'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(F'''{model_name}''' ) processor.push_to_hub(F'''{model_name}''' ) if __name__ == "__main__": a : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) a : List[Any] = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
69
from math import isclose, sqrt def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x __SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4 __SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100 __SCREAMING_SNAKE_CASE : str = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __SCREAMING_SNAKE_CASE : int = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus __SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a__ ( snake_case = 1.4 , snake_case = -9.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : float = first_x_coord __SCREAMING_SNAKE_CASE : float = first_y_coord __SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
74
0
from collections.abc import Sequence def _SCREAMING_SNAKE_CASE ( lowercase : Sequence[float] , lowercase : float ): '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(lowercase ) ) def _SCREAMING_SNAKE_CASE ( lowercase : Sequence[float] , lowercase : float ): '''simple docstring''' lowerCamelCase_ = 0.0 for coeff in reversed(lowercase ): lowerCamelCase_ = result * x + coeff return result if __name__ == "__main__": lowerCamelCase : Union[str, Any] = (0.0, 0.0, 5.0, 9.3, 7.0) lowerCamelCase : Union[str, Any] = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
70
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20} __SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : int = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : Optional[Any] = num_channels __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : int = min_resolution __SCREAMING_SNAKE_CASE : Optional[int] = max_resolution __SCREAMING_SNAKE_CASE : List[Any] = do_resize __SCREAMING_SNAKE_CASE : Union[str, Any] = size __SCREAMING_SNAKE_CASE : str = do_center_crop __SCREAMING_SNAKE_CASE : Any = crop_size def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''crop_size''' ) ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : int ): """simple docstring""" pass def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
74
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule _lowerCamelCase = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys _lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
71
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )] # initialize interval's left pointer and right pointer __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0 for i in range(1 , len(snake_case ) ): # case when current index is inside the interval if i <= right_pointer: __SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __SCREAMING_SNAKE_CASE : Dict = min_edge while go_next(snake_case , snake_case , snake_case ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1 return z_result def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]] def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(snake_case ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
74
0
'''simple docstring''' import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : int = logging.get_logger(__name__) _UpperCAmelCase : Optional[Any] = { '''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''', '''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''', } class __magic_name__ ( __SCREAMING_SNAKE_CASE ): UpperCamelCase__ = 'encodec' def __init__( self , snake_case_=[1.5, 3.0, 6.0, 12.0, 24.0] , snake_case_=2_40_00 , snake_case_=1 , snake_case_=False , snake_case_=None , snake_case_=None , snake_case_=1_28 , snake_case_=32 , snake_case_=1 , snake_case_=[8, 5, 4, 2] , snake_case_="weight_norm" , snake_case_=7 , snake_case_=7 , snake_case_=3 , snake_case_=2 , snake_case_=True , snake_case_="reflect" , snake_case_=2 , snake_case_=2 , snake_case_=1.0 , snake_case_=10_24 , snake_case_=None , snake_case_=True , **snake_case_ , ): lowercase =target_bandwidths lowercase =sampling_rate lowercase =audio_channels lowercase =normalize lowercase =chunk_length_s lowercase =overlap lowercase =hidden_size lowercase =num_filters lowercase =num_residual_layers lowercase =upsampling_ratios lowercase =norm_type lowercase =kernel_size lowercase =last_kernel_size lowercase =residual_kernel_size lowercase =dilation_growth_rate lowercase =use_causal_conv lowercase =pad_mode lowercase =compress lowercase =num_lstm_layers lowercase =trim_right_ratio lowercase =codebook_size lowercase =codebook_dim if codebook_dim is not None else hidden_size lowercase =use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' ) super().__init__(**snake_case_ ) @property def _A( self ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _A( self ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def _A( self ): lowercase =np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def _A( self ): return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
72
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
0
from math import pi def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
73
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig() # derive patch size from model name __SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 __SCREAMING_SNAKE_CASE : Optional[Any] = 12 __SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 __SCREAMING_SNAKE_CASE : int = 4_096 __SCREAMING_SNAKE_CASE : Tuple = 16 __SCREAMING_SNAKE_CASE : Optional[int] = 24 __SCREAMING_SNAKE_CASE : Optional[int] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 if model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Any = 336 __SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Any = 768 return config def a__ ( snake_case ): """simple docstring""" # text encoder if name == "token_embedding.weight": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' ) if key.startswith('''visual''' ): __SCREAMING_SNAKE_CASE : List[Any] = key_split[3] __SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Optional[Any] = val[ :dim ] __SCREAMING_SNAKE_CASE : Tuple = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim: ] else: if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : str = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Dict = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[-dim:] elif key.startswith('''mit''' ): __SCREAMING_SNAKE_CASE : List[str] = key_split[2] __SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : str = val[:dim, :] __SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Any = val[:dim] __SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2] __SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:] else: __SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2] __SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[:dim, :] __SCREAMING_SNAKE_CASE : int = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Dict = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Tuple = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : int = val[-dim:] else: __SCREAMING_SNAKE_CASE : int = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __SCREAMING_SNAKE_CASE : int = val.T __SCREAMING_SNAKE_CASE : Union[str, Any] = val return orig_state_dict def a__ ( snake_case ): """simple docstring""" if num_frames == 8: __SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy''' elif num_frames == 16: __SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy''' elif num_frames == 32: __SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy''' __SCREAMING_SNAKE_CASE : List[str] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , ) __SCREAMING_SNAKE_CASE : int = np.load(snake_case ) return list(snake_case ) def a__ ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name] __SCREAMING_SNAKE_CASE : Any = 8 if "16-frames" in model_name: __SCREAMING_SNAKE_CASE : Optional[int] = 16 elif "shot" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 32 __SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin''' gdown.cached_download(snake_case , snake_case , quiet=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model'''] else: __SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224 __SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case ) __SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) __SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case ) # Verify outputs __SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video __SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 ) print('''Probs:''' , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] ) elif model_name == "xclip-base-patch16": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] ) elif model_name == "xclip-large-patch14": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case , organization='''nielsr''' ) processor.push_to_hub(snake_case , organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
0
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCamelCase_ : def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : Optional[int] = batch_size UpperCAmelCase__ : str = num_channels UpperCAmelCase__ : str = image_size UpperCAmelCase__ : List[str] = patch_size UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : List[str] = use_input_mask UpperCAmelCase__ : Tuple = use_token_type_ids UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : int = vocab_size UpperCAmelCase__ : List[Any] = hidden_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : List[str] = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Dict = hidden_act UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Tuple = type_vocab_size UpperCAmelCase__ : Any = type_sequence_label_size UpperCAmelCase__ : List[str] = initializer_range UpperCAmelCase__ : List[str] = coordinate_size UpperCAmelCase__ : Tuple = shape_size UpperCAmelCase__ : Optional[int] = num_labels UpperCAmelCase__ : Optional[Any] = num_choices UpperCAmelCase__ : Union[str, Any] = scope UpperCAmelCase__ : Optional[Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) UpperCAmelCase__ : str = text_seq_length UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1 UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) UpperCAmelCase__ : int = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase__ : str = bbox[i, j, 3] UpperCAmelCase__ : Dict = bbox[i, j, 1] UpperCAmelCase__ : str = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase__ : Optional[int] = bbox[i, j, 2] UpperCAmelCase__ : Any = bbox[i, j, 0] UpperCAmelCase__ : List[Any] = tmp_coordinate UpperCAmelCase__ : str = tf.constant(_A ) UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Any = None if self.use_input_mask: UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] ) UpperCAmelCase__ : Any = None if self.use_token_type_ids: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : List[str] = None if self.use_labels: UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A ) # text + image UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A ) UpperCAmelCase__ : Tuple = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , ) UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only UpperCAmelCase__ : Any = model(_A , training=_A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.num_labels UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A ) UpperCAmelCase__ : Union[str, Any] = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.num_labels UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A ) UpperCAmelCase__ : Optional[int] = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ): '''simple docstring''' UpperCAmelCase__ : str = 2 UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A ) UpperCAmelCase__ : str = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : int = self.prepare_config_and_inputs() ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs UpperCAmelCase__ : List[Any] = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowerCAmelCase__ = ( {'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel} if is_tf_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ): '''simple docstring''' return True def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ): '''simple docstring''' UpperCAmelCase__ : List[Any] = copy.deepcopy(_A ) if model_class in get_values(_A ): UpperCAmelCase__ : Tuple = { k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(_A , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(_A ): UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_A ): UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_A ): UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_A ): UpperCAmelCase__ : int = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self ) UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 ) def lowercase_ ( self : str ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[Any] = model_class(_A ) if getattr(_A , '''hf_compute_loss''' , _A ): # The number of elements in the loss should be the same as the number of elements in the label UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : List[Any] = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0] ] UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' ) UpperCAmelCase__ : List[Any] = model(_A , **_A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' ) if "labels" in prepared_for_class: UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: UpperCAmelCase__ : Any = -100 UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A ) UpperCAmelCase__ : int = model(_A , **_A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : Dict = model(_A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) # Get keys that were added with the _prepare_for_class function UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys() UpperCAmelCase__ : int = inspect.signature(model.call ).parameters UpperCAmelCase__ : Union[str, Any] = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple UpperCAmelCase__ : Dict = {0: '''input_ids'''} for label_key in label_keys: UpperCAmelCase__ : str = signature_names.index(_A ) UpperCAmelCase__ : List[Any] = label_key UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple UpperCAmelCase__ : Tuple = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: UpperCAmelCase__ : Any = prepared_for_class[value] UpperCAmelCase__ : Tuple = tuple(_A ) # Send to model UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowercase_ ( self : int ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A ) def lowercase_ ( self : Tuple ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : Union[str, Any] = type self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A ) def lowercase_ ( self : List[str] ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( _A , _A , _A , _A , _A , _A , _A ) def lowercase_ ( self : Any ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( _A , _A , _A , _A , _A , _A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( _A , _A , _A , _A , _A , _A , _A ) @slow def lowercase_ ( self : List[Any] ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def a__ ( ) -> List[str]: UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf class lowerCamelCase_ ( unittest.TestCase ): @cached_property def lowercase_ ( self : Dict ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None @slow def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ) UpperCAmelCase__ : Dict = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values UpperCAmelCase__ : str = tf.constant([[1, 2]] ) UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A ) # verify the logits UpperCAmelCase__ : Optional[int] = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , _A ) UpperCAmelCase__ : Dict = tf.constant( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
75
from pathlib import Path import fire def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = Path(snake_case ) __SCREAMING_SNAKE_CASE : Dict = Path(snake_case ) dest_dir.mkdir(exist_ok=snake_case ) for path in src_dir.iterdir(): __SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] __SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name ) print(snake_case ) dest_path.open('''w''' ).write('''\n'''.join(snake_case ) ) if __name__ == "__main__": fire.Fire(minify)
74
0
"""simple docstring""" import math import tensorflow as tf from packaging import version def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : List[str] = tf.convert_to_tensor(__UpperCamelCase ) __lowercase : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Optional[int] = tf.convert_to_tensor(__UpperCamelCase ) __lowercase : str = tf.cast(math.pi , x.dtype ) __lowercase : Dict = tf.cast(0.044_715 , x.dtype ) __lowercase : Any = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) )) return x * cdf def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Tuple = tf.convert_to_tensor(__UpperCamelCase ) return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) ) def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : str = tf.convert_to_tensor(__UpperCamelCase ) __lowercase : int = tf.cast(0.044_715 , x.dtype ) __lowercase : Union[str, Any] = tf.cast(0.7_978_845_608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase ) __lowercase : Dict = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def __UpperCAmelCase ( __UpperCamelCase ): return tf.clip_by_value(_gelu(__UpperCamelCase ) , -10 , 10 ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase=-1 ): __lowercase ,__lowercase : str = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase ) return a * tf.math.sigmoid(__UpperCamelCase ) if version.parse(tf.version.VERSION) >= version.parse('2.4'): def __UpperCAmelCase ( __UpperCamelCase ): return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase ) a_ = tf.keras.activations.gelu a_ = approximate_gelu_wrap else: a_ = _gelu a_ = _gelu_new a_ = { 'gelu': gelu, 'gelu_10': gelu_aa, 'gelu_fast': gelu_fast, 'gelu_new': gelu_new, 'glu': glu, 'mish': mish, 'quick_gelu': quick_gelu, 'relu': tf.keras.activations.relu, 'sigmoid': tf.keras.activations.sigmoid, 'silu': tf.keras.activations.swish, 'swish': tf.keras.activations.swish, 'tanh': tf.keras.activations.tanh, } def __UpperCAmelCase ( __UpperCamelCase ): if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
76
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 ) __SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 ) __SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
74
0
"""simple docstring""" import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class a__ ( unittest.TestCase ): def __init__( self : Tuple , UpperCamelCase_ : Optional[Any]): """simple docstring""" __UpperCAmelCase : Tuple = parent def a_ ( self : Dict): """simple docstring""" return {} def _UpperCamelCase ( ) -> Dict: """simple docstring""" __UpperCAmelCase : List[Any] = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>" __UpperCAmelCase : List[str] = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n " return [html_string_a, html_string_a] @require_bsa class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = MarkupLMFeatureExtractor if is_bsa_available() else None def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : List[str] = MarkupLMFeatureExtractionTester(self) @property def a_ ( self : Tuple): """simple docstring""" return self.feature_extract_tester.prepare_feat_extract_dict() def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : List[Any] = self.feature_extraction_class() # Test not batched input __UpperCAmelCase : Tuple = get_html_strings()[0] __UpperCAmelCase : Tuple = feature_extractor(UpperCamelCase_) # fmt: off __UpperCAmelCase : Dict = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]] __UpperCAmelCase : List[Any] = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]] # fmt: on self.assertEqual(encoding.nodes , UpperCamelCase_) self.assertEqual(encoding.xpaths , UpperCamelCase_) # Test batched __UpperCAmelCase : Optional[int] = get_html_strings() __UpperCAmelCase : str = feature_extractor(UpperCamelCase_) # fmt: off __UpperCAmelCase : Optional[int] = expected_nodes + [["My First Heading", "My first paragraph."]] __UpperCAmelCase : int = expected_xpaths + [["/html/body/h1", "/html/body/p"]] self.assertEqual(len(encoding.nodes) , 2) self.assertEqual(len(encoding.xpaths) , 2) self.assertEqual(encoding.nodes , UpperCamelCase_) self.assertEqual(encoding.xpaths , UpperCamelCase_)
77
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} ) lowerCAmelCase_ = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) lowerCAmelCase_ = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = {} if self.train_dir is not None: __SCREAMING_SNAKE_CASE : Dict = self.train_dir if self.validation_dir is not None: __SCREAMING_SNAKE_CASE : Any = self.validation_dir __SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) lowerCAmelCase_ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class __UpperCamelCase : """simple docstring""" def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = input_size __SCREAMING_SNAKE_CASE : List[str] = mask_patch_size __SCREAMING_SNAKE_CASE : Dict = model_patch_size __SCREAMING_SNAKE_CASE : int = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('''Input size must be divisible by mask patch size''' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('''Mask patch size must be divisible by model patch size''' ) __SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size __SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size __SCREAMING_SNAKE_CASE : int = self.rand_size**2 __SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count] __SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = 1 __SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) ) __SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] ) __SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def a__ ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mim''' , snake_case , snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level() logger.setLevel(snake_case ) transformers.utils.logging.set_verbosity(snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. __SCREAMING_SNAKE_CASE : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0: __SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split ) __SCREAMING_SNAKE_CASE : int = split['''train'''] __SCREAMING_SNAKE_CASE : Dict = split['''test'''] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE : List[Any] = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: __SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(snake_case , '''decoder_type''' ): __SCREAMING_SNAKE_CASE : Any = '''simmim''' # adapt config __SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size __SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size __SCREAMING_SNAKE_CASE : str = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { '''image_size''': model_args.image_size, '''patch_size''': model_args.patch_size, '''encoder_stride''': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: __SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } __SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case ) if training_args.do_train: __SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names else: __SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names if data_args.image_column_name is not None: __SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name elif "image" in column_names: __SCREAMING_SNAKE_CASE : str = '''image''' elif "img" in column_names: __SCREAMING_SNAKE_CASE : List[str] = '''img''' else: __SCREAMING_SNAKE_CASE : Tuple = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py __SCREAMING_SNAKE_CASE : Any = Compose( [ Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator __SCREAMING_SNAKE_CASE : str = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(snake_case ): __SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]] __SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE : Union[str, Any] = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case ) # Initialize our trainer __SCREAMING_SNAKE_CASE : List[str] = Trainer( model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE : int = last_checkpoint __SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate() trainer.log_metrics('''eval''' , snake_case ) trainer.save_metrics('''eval''' , snake_case ) # Write model card and (optionally) push to hub __SCREAMING_SNAKE_CASE : Optional[Any] = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''masked-image-modeling''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-image-modeling'''], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case ) else: trainer.create_model_card(**snake_case ) if __name__ == "__main__": main()
74
0
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE_: Optional[Any] =get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class __A ( UpperCamelCase__ , unittest.TestCase ): a__ : Union[str, Any] = DebertaVaTokenizer a__ : Any = DebertaVaTokenizerFast a__ : Union[str, Any] = True a__ : Tuple = True def _lowercase (self : Dict ): super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase_ = DebertaVaTokenizer(__a , unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase (self : int , __a : Optional[Any] ): UpperCAmelCase_ = "this is a test" UpperCAmelCase_ = "this is a test" return input_text, output_text def _lowercase (self : Tuple ): UpperCAmelCase_ = "<pad>" UpperCAmelCase_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a ) def _lowercase (self : Dict ): UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "[PAD]" ) self.assertEqual(len(__a ) , 30001 ) def _lowercase (self : List[str] ): self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _lowercase (self : Tuple ): # fmt: off UpperCAmelCase_ = " \tHeLLo!how \n Are yoU? " UpperCAmelCase_ = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on UpperCAmelCase_ = DebertaVaTokenizer(__a , do_lower_case=__a ) UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) ) self.assertListEqual(__a , __a ) UpperCAmelCase_ = DebertaVaTokenizerFast(__a , do_lower_case=__a ) UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) ) self.assertListEqual(__a , __a ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def _lowercase (self : str ): pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def _lowercase (self : Any ): pass def _lowercase (self : List[Any] ): # fmt: off UpperCAmelCase_ = "I was born in 92000, and this is falsé." UpperCAmelCase_ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ = DebertaVaTokenizer(__a , split_by_punct=__a ) UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) ) self.assertListEqual(__a , __a ) UpperCAmelCase_ = DebertaVaTokenizerFast(__a , split_by_punct=__a ) UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) ) self.assertListEqual(__a , __a ) def _lowercase (self : Dict ): # fmt: off UpperCAmelCase_ = "I was born in 92000, and this is falsé." UpperCAmelCase_ = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a ) UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) ) self.assertListEqual(__a , __a ) UpperCAmelCase_ = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a ) UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) ) self.assertListEqual(__a , __a ) def _lowercase (self : Any ): # fmt: off UpperCAmelCase_ = "I was born in 92000, and this is falsé." UpperCAmelCase_ = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on UpperCAmelCase_ = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a ) UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) ) self.assertListEqual(__a , __a ) UpperCAmelCase_ = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a ) UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) ) self.assertListEqual(__a , __a ) def _lowercase (self : Optional[Any] ): # fmt: off UpperCAmelCase_ = "I was born in 92000, and this is falsé." UpperCAmelCase_ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a ) UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) ) self.assertListEqual(__a , __a ) UpperCAmelCase_ = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a ) UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) ) self.assertListEqual(__a , __a ) def _lowercase (self : int ): # fmt: off UpperCAmelCase_ = " \tHeLLo!how \n Are yoU? " UpperCAmelCase_ = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on UpperCAmelCase_ = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a ) UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) ) self.assertListEqual(__a , __a ) UpperCAmelCase_ = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a ) UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) ) self.assertListEqual(__a , __a ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_rust_tokenizer() UpperCAmelCase_ = "I was born in 92000, and this is falsé." UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) ) UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) ) self.assertListEqual(__a , __a ) UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a ) UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) UpperCAmelCase_ = self.get_rust_tokenizer() UpperCAmelCase_ = tokenizer.encode(__a ) UpperCAmelCase_ = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) def _lowercase (self : Tuple ): UpperCAmelCase_ = "This is a test" UpperCAmelCase_ = [13, 1, 4398, 25, 21, 1289] UpperCAmelCase_ = ["▁", "T", "his", "▁is", "▁a", "▁test"] UpperCAmelCase_ = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] UpperCAmelCase_ = DebertaVaTokenizer(__a , keep_accents=__a ) UpperCAmelCase_ = DebertaVaTokenizerFast(__a , keep_accents=__a ) UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) UpperCAmelCase_ = tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual(__a , __a ) UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) UpperCAmelCase_ = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual(__a , __a ) # fmt: off UpperCAmelCase_ = "I was born in 92000, and this is falsé." UpperCAmelCase_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] UpperCAmelCase_ = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] UpperCAmelCase_ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) UpperCAmelCase_ = tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual(__a , __a ) UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) UpperCAmelCase_ = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual(__a , __a ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = DebertaVaTokenizer(__a ) UpperCAmelCase_ = tokenizer.encode("sequence builders" ) UpperCAmelCase_ = tokenizer.encode("multi-sequence build" ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __a ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __a , ) @slow def _lowercase (self : Union[str, Any] ): # fmt: off UpperCAmelCase_ = {"input_ids": [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
78
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''data2vec-vision''' def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Any = hidden_size __SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = initializer_range __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : Any = image_size __SCREAMING_SNAKE_CASE : Optional[int] = patch_size __SCREAMING_SNAKE_CASE : Any = num_channels __SCREAMING_SNAKE_CASE : List[str] = use_mask_token __SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings __SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias __SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value __SCREAMING_SNAKE_CASE : str = drop_path_rate __SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : str = out_indices __SCREAMING_SNAKE_CASE : List[str] = pool_scales # auxiliary head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight __SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels __SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input __SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return 1e-4
74
0
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef SCREAMING_SNAKE_CASE__ : List[str] = ( """This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" ) def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple: '''simple docstring''' warnings.warn(__lowerCamelCase , __lowerCamelCase ) requires_backends(__lowerCamelCase , """sklearn""" ) return (preds == labels).mean() def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Any: '''simple docstring''' warnings.warn(__lowerCamelCase , __lowerCamelCase ) requires_backends(__lowerCamelCase , """sklearn""" ) UpperCAmelCase__ : Optional[int] = simple_accuracy(__lowerCamelCase , __lowerCamelCase ) UpperCAmelCase__ : str = fa_score(y_true=__lowerCamelCase , y_pred=__lowerCamelCase ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' warnings.warn(__lowerCamelCase , __lowerCamelCase ) requires_backends(__lowerCamelCase , """sklearn""" ) UpperCAmelCase__ : Tuple = pearsonr(__lowerCamelCase , __lowerCamelCase )[0] UpperCAmelCase__ : Any = spearmanr(__lowerCamelCase , __lowerCamelCase )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str: '''simple docstring''' warnings.warn(__lowerCamelCase , __lowerCamelCase ) requires_backends(__lowerCamelCase , """sklearn""" ) assert len(__lowerCamelCase ) == len(__lowerCamelCase ), F"Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}" if task_name == "cola": return {"mcc": matthews_corrcoef(__lowerCamelCase , __lowerCamelCase )} elif task_name == "sst-2": return {"acc": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} elif task_name == "mrpc": return acc_and_fa(__lowerCamelCase , __lowerCamelCase ) elif task_name == "sts-b": return pearson_and_spearman(__lowerCamelCase , __lowerCamelCase ) elif task_name == "qqp": return acc_and_fa(__lowerCamelCase , __lowerCamelCase ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} elif task_name == "qnli": return {"acc": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} elif task_name == "rte": return {"acc": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} elif task_name == "wnli": return {"acc": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} elif task_name == "hans": return {"acc": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} else: raise KeyError(__lowerCamelCase ) def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: '''simple docstring''' warnings.warn(__lowerCamelCase , __lowerCamelCase ) requires_backends(__lowerCamelCase , """sklearn""" ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError(F"Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}" ) if task_name == "xnli": return {"acc": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} else: raise KeyError(__lowerCamelCase )
79
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=7 , _A : List[str]=True , _A : Dict=True , _A : Tuple=False , _A : Union[str, Any]=True , _A : List[str]=99 , _A : Union[str, Any]=32 , _A : str=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : Optional[int]=4 , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : str = seq_length __SCREAMING_SNAKE_CASE : int = is_training __SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask __SCREAMING_SNAKE_CASE : str = use_token_type_ids __SCREAMING_SNAKE_CASE : Any = use_labels __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : Any = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads __SCREAMING_SNAKE_CASE : List[str] = intermediate_size __SCREAMING_SNAKE_CASE : List[str] = hidden_act __SCREAMING_SNAKE_CASE : int = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size __SCREAMING_SNAKE_CASE : int = initializer_range __SCREAMING_SNAKE_CASE : List[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = num_choices __SCREAMING_SNAKE_CASE : Union[str, Any] = scope def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: __SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE : Any = None __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : int = None if self.use_labels: __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForMaskedLM(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : int = model( _A , attention_mask=_A , start_positions=_A , end_positions=_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels __SCREAMING_SNAKE_CASE : List[Any] = DistilBertForSequenceClassification(_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : List[str] , _A : int , _A : List[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForTokenClassification(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : int , _A : Optional[int] , _A : List[Any] , _A : int , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.num_choices __SCREAMING_SNAKE_CASE : int = DistilBertForMultipleChoice(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Optional[Any] = model( _A , attention_mask=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs __SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCAmelCase_ = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self ) __SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , dim=37 ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*_A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*_A ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*_A ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @slow @require_torch_gpu def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A ) __SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace( _A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) ) __SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A ) loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )[0] __SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _A ) __SCREAMING_SNAKE_CASE : Any = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
74
0
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): __snake_case :Dict = StableUnCLIPPipeline __snake_case :List[Any] = TEXT_TO_IMAGE_PARAMS __snake_case :Dict = TEXT_TO_IMAGE_BATCH_PARAMS __snake_case :Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS __snake_case :Any = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false __snake_case :Tuple = False def _a ( self : str ) -> Any: """simple docstring""" __lowercase = 32 __lowercase = embedder_hidden_size # prior components torch.manual_seed(0 ) __lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) __lowercase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCAmelCase , projection_dim=_lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) __lowercase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowerCAmelCase , num_layers=1 , ) torch.manual_seed(0 ) __lowercase = DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0 ) __lowercase = StableUnCLIPImageNormalizer(embedding_dim=_lowerCAmelCase ) __lowercase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) __lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) __lowercase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCAmelCase , layers_per_block=1 , upcast_attention=_lowerCAmelCase , use_linear_projection=_lowerCAmelCase , ) torch.manual_seed(0 ) __lowercase = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , ) torch.manual_seed(0 ) __lowercase = AutoencoderKL() __lowercase = { # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def _a ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any]=0 ) -> List[Any]: """simple docstring""" if str(_lowerCAmelCase ).startswith("""mps""" ): __lowercase = torch.manual_seed(_lowerCAmelCase ) else: __lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) __lowercase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _a ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase = torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=_lowerCAmelCase ) def _a ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=_lowerCAmelCase ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def _a ( self : Dict ) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Tuple ) -> Any: """simple docstring""" __lowercase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) __lowercase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowercase = pipe("""anime turle""" , generator=_lowerCAmelCase , output_type="""np""" ) __lowercase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase ) def _a ( self : List[str] ) -> int: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowercase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) __lowercase = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) __lowercase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
80
import logging import os import threading import time try: import warnings except ImportError: lowercase_ = None try: import msvcrt except ImportError: lowercase_ = None try: import fcntl except ImportError: lowercase_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowercase_ = OSError # Data # ------------------------------------------------ lowercase_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] lowercase_ = """3.0.12""" lowercase_ = None def a__ ( ): """simple docstring""" global _logger __SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ ) return _logger class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file return None def __str__( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[Any] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = lock return None def __enter__( self : Any ): """simple docstring""" return self.lock def __exit__( self : str , _A : Any , _A : int , _A : Any ): """simple docstring""" self.lock.release() return None class __UpperCamelCase : """simple docstring""" def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A ) # The path to the lock file. __SCREAMING_SNAKE_CASE : Tuple = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __SCREAMING_SNAKE_CASE : str = None # The default timeout value. __SCREAMING_SNAKE_CASE : Any = timeout # We use this lock primarily for the lock counter. __SCREAMING_SNAKE_CASE : int = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __SCREAMING_SNAKE_CASE : int = 0 return None @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._timeout @timeout.setter def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = float(_A ) return None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" raise NotImplementedError() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" raise NotImplementedError() @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file_fd is not None def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ): """simple docstring""" if timeout is None: __SCREAMING_SNAKE_CASE : Optional[int] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __SCREAMING_SNAKE_CASE : Tuple = id(self ) __SCREAMING_SNAKE_CASE : Any = self._lock_file __SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(_A ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCAmelCase__ ( self : int , _A : List[str]=False ): """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __SCREAMING_SNAKE_CASE : Optional[int] = id(self ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() __SCREAMING_SNAKE_CASE : int = 0 logger().debug(F'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : int ): """simple docstring""" self.acquire() return self def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ): """simple docstring""" self.release() return None def __del__( self : int ): """simple docstring""" self.release(force=_A ) return None def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = os.path.basename(_A ) if len(_A ) > max_length and max_length > 0: __SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) ) __SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(_A , _A ) else: return path class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ): """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(_A , timeout=_A , max_filename_length=_A ) __SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A ) except OSError: pass else: try: msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : str = fd return None def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self._lock_file_fd __SCREAMING_SNAKE_CASE : int = None msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 ) os.close(_A ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax super().__init__(_A , timeout=_A , max_filename_length=_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC __SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A ) try: fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : int = fd return None def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd __SCREAMING_SNAKE_CASE : Any = None fcntl.flock(_A , fcntl.LOCK_UN ) os.close(_A ) return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A ) except OSError: pass else: __SCREAMING_SNAKE_CASE : List[str] = fd return None def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" os.close(self._lock_file_fd ) __SCREAMING_SNAKE_CASE : Optional[Any] = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowercase_ = None if msvcrt: lowercase_ = WindowsFileLock elif fcntl: lowercase_ = UnixFileLock else: lowercase_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
74
0
import torch def lowerCAmelCase_ ( ): if torch.cuda.is_available(): __snake_case : int = torch.cuda.device_count() else: __snake_case : Tuple = 0 print(F'Successfully ran on {num_gpus} GPUs' ) if __name__ == "__main__": main()
81
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[Any] , **_A : Dict ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**_A ) def UpperCAmelCase__ ( self : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : Any = [] __SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) ) __SCREAMING_SNAKE_CASE : Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' ) __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = [] for element in html_code.descendants: if type(_A ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip() if not text_in_this_tag: continue all_doc_strings.append(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A ) stringaxtag_seq.append(_A ) stringaxsubs_seq.append(_A ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' for tagname, subs in zip(_A , _A ): xpath += F'''/{tagname}''' if subs != 0: xpath += F'''[{subs}]''' return xpath def __call__( self : Optional[int] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = False # Check that strings has a valid type if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = True elif isinstance(_A , (list, tuple) ): if len(_A ) == 0 or isinstance(html_strings[0] , _A ): __SCREAMING_SNAKE_CASE : List[Any] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F'''but is of type {type(_A )}.''' ) __SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) ) if not is_batched: __SCREAMING_SNAKE_CASE : Dict = [html_strings] # Get nodes + xpaths __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Tuple = [] for html_string in html_strings: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A ) nodes.append(_A ) __SCREAMING_SNAKE_CASE : Dict = [] for node, tag_list, sub_list in zip(_A , _A , _A ): __SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A ) xpath_strings.append(_A ) xpaths.append(_A ) # return as Dict __SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths} __SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A ) return encoded_inputs
74
0
"""simple docstring""" import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py lowerCamelCase = """\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\", author = \"Lin, Chin-Yew and Och, Franz Josef\", booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\", month = \"aug 23{--}aug 27\", year = \"2004\", address = \"Geneva, Switzerland\", publisher = \"COLING\", url = \"https://www.aclweb.org/anthology/C04-1072\", pages = \"501--507\", } """ lowerCamelCase = """\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation, the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. """ lowerCamelCase = """ Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 'bleu': bleu score, 'precisions': geometric mean of n-gram precisions, 'brevity_penalty': brevity penalty, 'length_ratio': ratio of lengths, 'translation_length': translation_length, 'reference_length': reference_length Examples: >>> predictions = [ ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample ... ] >>> references = [ ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references) ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric(\"bleu\") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results[\"bleu\"]) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): '''simple docstring''' def lowercase__ ( self : List[str] ) -> List[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[ "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def lowercase__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Dict=False ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = compute_bleu( reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
82
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger() def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ): """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": __SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case ) else: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case ) if hidden_sizes == 192: __SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case ) if hidden_sizes == 256: __SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case ) if hidden_sizes == 384: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case ) from_model.eval() __SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval() __SCREAMING_SNAKE_CASE : int = OrderedDict() __SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict() __SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() ) __SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() ) print(len(snake_case ) , len(snake_case ) ) for i in range(len(snake_case ) ): __SCREAMING_SNAKE_CASE : int = weights[og_keys[i]] our_model.load_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) ) __SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one." __SCREAMING_SNAKE_CASE : Union[str, Any] = name print(snake_case ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'''Pushed {checkpoint_name}''' ) def a__ ( snake_case , snake_case = None , snake_case = True ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json''' __SCREAMING_SNAKE_CASE : int = 1_000 __SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels) __SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE : Optional[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : str = idalabel __SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''levit-128S''': 128, '''levit-128''': 128, '''levit-192''': 192, '''levit-256''': 256, '''levit-384''': 384, } __SCREAMING_SNAKE_CASE : Optional[int] = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case ) return config, expected_shape if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) lowercase_ = parser.parse_args() lowercase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
74
0
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : int = ArgumentParser('''Accelerate CLI tool''', usage='''accelerate <command> [<args>]''', allow_abbrev=A_ ) _lowerCamelCase : Union[str, Any] = parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=A_ ) env_command_parser(subparsers=A_ ) launch_command_parser(subparsers=A_ ) tpu_command_parser(subparsers=A_ ) test_command_parser(subparsers=A_ ) # Let's go _lowerCamelCase : str = parser.parse_args() if not hasattr(A_, '''func''' ): parser.print_help() exit(1 ) # Run args.func(A_ ) if __name__ == "__main__": main()
83
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
0
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCAmelCase = ''' Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline >>> from transformers import pipeline >>> from diffusers.utils import load_image >>> def make_hint(image, depth_estimator): ... image = depth_estimator(image)["depth"] ... image = np.array(image) ... image = image[:, :, None] ... image = np.concatenate([image, image, image], axis=2) ... detected_map = torch.from_numpy(image).float() / 255.0 ... hint = detected_map.permute(2, 0, 1) ... return hint >>> depth_estimator = pipeline("depth-estimation") >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ... ) >>> pipe_prior = pipe_prior.to("cuda") >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> img = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" ... "/kandinsky/cat.png" ... ).resize((768, 768)) >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") >>> prompt = "A robot, 4k photo" >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" >>> generator = torch.Generator(device="cuda").manual_seed(43) >>> image_emb, zero_image_emb = pipe_prior( ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ... ).to_tuple() >>> images = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... hint=hint, ... num_inference_steps=50, ... generator=generator, ... height=768, ... width=768, ... ).images >>> images[0].save("robot_cat.png") ``` ''' def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=8 ): lowercase = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 lowercase = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class A_ ( __lowerCamelCase ): '''simple docstring''' def __init__( self , snake_case , snake_case , snake_case , ): super().__init__() self.register_modules( unet=snake_case , scheduler=snake_case , movq=snake_case , ) lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): if latents is None: lowercase = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) lowercase = latents.to(snake_case ) lowercase = latents * scheduler.init_noise_sigma return latents def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) lowercase = torch.device(F'''cuda:{gpu_id}''' ) lowercase = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(snake_case , snake_case ) def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ): if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) lowercase = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=snake_case ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowercase = None for cpu_offloaded_model in [self.unet, self.movq]: lowercase , lowercase = cpu_offload_with_hook(snake_case , snake_case , prev_module_hook=snake_case ) # We'll offload the last model manually. lowercase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def SCREAMING_SNAKE_CASE__ ( self ): if not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(snake_case , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(snake_case ) def __call__( self , snake_case , snake_case , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 100 , snake_case = 4.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , ): lowercase = self._execution_device lowercase = guidance_scale > 1.0 if isinstance(snake_case , snake_case ): lowercase = torch.cat(snake_case , dim=0 ) if isinstance(snake_case , snake_case ): lowercase = torch.cat(snake_case , dim=0 ) if isinstance(snake_case , snake_case ): lowercase = torch.cat(snake_case , dim=0 ) lowercase = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: lowercase = image_embeds.repeat_interleave(snake_case , dim=0 ) lowercase = negative_image_embeds.repeat_interleave(snake_case , dim=0 ) lowercase = hint.repeat_interleave(snake_case , dim=0 ) lowercase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case ) lowercase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case ) self.scheduler.set_timesteps(snake_case , device=snake_case ) lowercase = self.scheduler.timesteps lowercase = self.movq.config.latent_channels lowercase , lowercase = downscale_height_and_width(snake_case , snake_case , self.movq_scale_factor ) # create initial latent lowercase = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case , snake_case , snake_case , self.scheduler , ) for i, t in enumerate(self.progress_bar(snake_case ) ): # expand the latents if we are doing classifier free guidance lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase = {'image_embeds': image_embeds, 'hint': hint} lowercase = self.unet( sample=snake_case , timestep=snake_case , encoder_hidden_states=snake_case , added_cond_kwargs=snake_case , return_dict=snake_case , )[0] if do_classifier_free_guidance: lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 ) lowercase , lowercase = noise_pred.chunk(2 ) lowercase , lowercase = variance_pred.chunk(2 ) lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowercase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , 'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 lowercase = self.scheduler.step( snake_case , snake_case , snake_case , generator=snake_case , )[0] # post-processing lowercase = self.movq.decode(snake_case , force_not_quantize=snake_case )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: lowercase = image * 0.5 + 0.5 lowercase = image.clamp(0 , 1 ) lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowercase = self.numpy_to_pil(snake_case ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case )
84
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowercase_ = logging.get_logger(__name__) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = set() __SCREAMING_SNAKE_CASE : str = [] def parse_line(snake_case ): for line in fp: if isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(snake_case ) buffer.clear() continue else: __SCREAMING_SNAKE_CASE : int = line.strip() buffer.append(snake_case ) if from_gh: for filename in os.listdir(snake_case ): __SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case ) if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with open(snake_case ) as fp: parse_line(snake_case ) else: try: with zipfile.ZipFile(snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with z.open(snake_case ) as fp: parse_line(snake_case ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = set() __SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) ) return selected_warnings if __name__ == "__main__": def a__ ( snake_case ): """simple docstring""" return values.split(''',''' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) lowercase_ = parser.parse_args() lowercase_ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowercase_ = extract_warnings(args.output_dir, args.targets) lowercase_ = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
74
0
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case ( UpperCamelCase_ ): lowercase_ = ['image_processor', 'tokenizer'] lowercase_ = 'BlipImageProcessor' lowercase_ = 'AutoTokenizer' def __init__( self : Optional[int] , a_ : Union[str, Any] , a_ : List[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = False super().__init__(a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor def __call__( self : List[Any] , a_ : ImageInput = None , a_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a_ : bool = True , a_ : Union[bool, str, PaddingStrategy] = False , a_ : Union[bool, str, TruncationStrategy] = None , a_ : Optional[int] = None , a_ : int = 0 , a_ : Optional[int] = None , a_ : Optional[bool] = None , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = True , a_ : Optional[Union[str, TensorType]] = None , **a_ : int , )-> BatchEncoding: """simple docstring""" if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: SCREAMING_SNAKE_CASE__ : int = self.tokenizer SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer( text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_token_type_ids=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , ) return text_encoding # add pixel_values SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(a_ , return_tensors=a_ ) if text is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer( text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_token_type_ids=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] = None if text_encoding is not None: encoding_image_processor.update(a_ ) return encoding_image_processor def __lowercase( self : Any , *a_ : Tuple , **a_ : Tuple )-> List[str]: """simple docstring""" return self.tokenizer.batch_decode(*a_ , **a_ ) def __lowercase( self : List[Any] , *a_ : Any , **a_ : Dict )-> int: """simple docstring""" return self.tokenizer.decode(*a_ , **a_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def __lowercase( self : Tuple )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
85
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : Dict = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim __SCREAMING_SNAKE_CASE : Tuple = in_channels __SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A ) __SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A ) # 3. Define transformers blocks __SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList( [ BasicTransformerBlock( _A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , ) for d in range(_A ) ] ) __SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A ) def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape __SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames __SCREAMING_SNAKE_CASE : Dict = hidden_states __SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A ) __SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A ) # 2. Blocks for block in self.transformer_blocks: __SCREAMING_SNAKE_CASE : Optional[Any] = block( _A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , ) # 3. Output __SCREAMING_SNAKE_CASE : Any = self.proj_out(_A ) __SCREAMING_SNAKE_CASE : List[str] = ( hidden_states[None, None, :] .reshape(_A , _A , _A , _A , _A ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=_A )
74
0
from ..utils import DummyObject, requires_backends class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : str = ['torch', 'transformers', 'onnx'] def __init__( self : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[str] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx'] def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Dict , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : int = ['torch', 'transformers', 'onnx'] def __init__( self : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = ['torch', 'transformers', 'onnx'] def __init__( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : int ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[str] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : int = ['torch', 'transformers', 'onnx'] def __init__( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] )
86
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase_ = """src/diffusers""" lowercase_ = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowercase_ = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase_ = spec.loader.load_module() def a__ ( snake_case , snake_case ): """simple docstring""" return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = object_name.split('''.''' ) __SCREAMING_SNAKE_CASE : str = 0 # First let's find the module where our object lives. __SCREAMING_SNAKE_CASE : Any = parts[i] while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ): i += 1 if i < len(snake_case ): __SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] ) if i >= len(snake_case ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Now let's find the class / func in the code! __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(snake_case ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __SCREAMING_SNAKE_CASE : List[Any] = line_index while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index] return "".join(snake_case ) lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowercase_ = re.compile(R"""<FILL\s+[^>]*>""") def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = code.split('''\n''' ) __SCREAMING_SNAKE_CASE : Dict = 0 while idx < len(snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(snake_case ): return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0 if has_indent: __SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}''' __SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case ) return result[len('''class Bla:\n''' ) :] if has_indent else result def a__ ( snake_case , snake_case=False ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[str] = f.readlines() __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(snake_case ): __SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups() __SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case ) __SCREAMING_SNAKE_CASE : str = get_indent(snake_case ) __SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2 __SCREAMING_SNAKE_CASE : Dict = theoretical_indent __SCREAMING_SNAKE_CASE : Optional[int] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __SCREAMING_SNAKE_CASE : List[Any] = True while line_index < len(snake_case ) and should_continue: line_index += 1 if line_index >= len(snake_case ): break __SCREAMING_SNAKE_CASE : Any = lines[line_index] __SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index] __SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case ) # Remove any nested `Copied from` comments to avoid circular copies __SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups() __SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case ) if option.strip() == "all-casing": __SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code ) __SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] __SCREAMING_SNAKE_CASE : str = start_index + 1 if overwrite and len(snake_case ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) return diffs def a__ ( snake_case = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = [] for filename in all_files: __SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase_ = parser.parse_args() check_copies(args.fix_and_overwrite)
74
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowerCamelCase : Any = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Union[str, Any] = ["""BartphoTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : int = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Tuple = replicate(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = shard(_A ) __SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2''' __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained( _A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : List[str] = scheduler_params __SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A ) __SCREAMING_SNAKE_CASE : List[str] = shard(_A ) __SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
74
0
"""simple docstring""" class lowercase__ : # Public class to implement a graph def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None: _lowerCamelCase : str = row _lowerCamelCase : Dict = col _lowerCamelCase : List[str] = graph def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> bool: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None: # Checking all 8 elements surrounding nth element _lowerCamelCase : Optional[int] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order _lowerCamelCase : Union[str, Any] = [-1, 0, 1, -1, 1, -1, 0, 1] _lowerCamelCase : Union[str, Any] = True # Make those cells visited for k in range(8): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , SCREAMING_SNAKE_CASE): self.diffs(i + row_nbr[k] , j + col_nbr[k] , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> int: # And finally, count all islands. _lowerCamelCase : Tuple = [[False for j in range(self.COL)] for i in range(self.ROW)] _lowerCamelCase : int = 0 for i in range(self.ROW): for j in range(self.COL): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) count += 1 return count
88
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase_ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2FeatureExtractor"""] lowercase_ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
0
import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir("fixtures/dummy-config.json") class _lowerCamelCase( unittest.TestCase ): def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" _lowercase : str = 0 def UpperCamelCase ( self) -> Tuple: """simple docstring""" self.assertIsNotNone(transformers.models.auto.__spec__) self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto')) def UpperCamelCase ( self) -> int: """simple docstring""" _lowercase : Tuple = AutoConfig.from_pretrained('bert-base-uncased') self.assertIsInstance(lowerCamelCase, lowerCamelCase) def UpperCamelCase ( self) -> Any: """simple docstring""" _lowercase : int = AutoConfig.from_pretrained(lowerCamelCase) self.assertIsInstance(lowerCamelCase, lowerCamelCase) def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" _lowercase : int = AutoConfig.from_pretrained(lowerCamelCase) self.assertIsInstance(lowerCamelCase, lowerCamelCase) def UpperCamelCase ( self) -> Optional[Any]: """simple docstring""" _lowercase : Union[str, Any] = AutoConfig.for_model('roberta') self.assertIsInstance(lowerCamelCase, lowerCamelCase) def UpperCamelCase ( self) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. _lowercase : int = os.path.join(lowerCamelCase, 'fake-roberta') os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase) with open(os.path.join(lowerCamelCase, 'config.json'), 'w') as f: f.write(json.dumps({})) _lowercase : Dict = AutoConfig.from_pretrained(lowerCamelCase) self.assertEqual(type(lowerCamelCase), lowerCamelCase) def UpperCamelCase ( self) -> int: """simple docstring""" try: AutoConfig.register('custom', lowerCamelCase) # Wrong model type will raise an error with self.assertRaises(lowerCamelCase): AutoConfig.register('model', lowerCamelCase) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase): AutoConfig.register('bert', lowerCamelCase) # Now that the config is registered, it can be used as any other config with the auto-API _lowercase : Tuple = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase) _lowercase : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase) self.assertIsInstance(lowerCamelCase, lowerCamelCase) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def UpperCamelCase ( self) -> List[Any]: """simple docstring""" with self.assertRaisesRegex( lowerCamelCase, 'bert-base is not a local folder and is not a valid model identifier'): _lowercase : List[Any] = AutoConfig.from_pretrained('bert-base') def UpperCamelCase ( self) -> int: """simple docstring""" with self.assertRaisesRegex( lowerCamelCase, R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'): _lowercase : int = AutoConfig.from_pretrained(lowerCamelCase, revision='aaaaaa') def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" with self.assertRaisesRegex( lowerCamelCase, 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.', ): _lowercase : int = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo') def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" with self.assertRaises(lowerCamelCase): _lowercase : Tuple = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model') # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase): _lowercase : Dict = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model', trust_remote_code=lowerCamelCase) _lowercase : Optional[Any] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model', trust_remote_code=lowerCamelCase) self.assertEqual(config.__class__.__name__, 'NewModelConfig') # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase) _lowercase : Dict = AutoConfig.from_pretrained(lowerCamelCase, trust_remote_code=lowerCamelCase) self.assertEqual(reloaded_config.__class__.__name__, 'NewModelConfig') def UpperCamelCase ( self) -> Optional[Any]: """simple docstring""" class _lowerCamelCase( _a ): lowercase_ : Dict = """new-model""" try: AutoConfig.register('new-model', lowerCamelCase) # If remote code is not set, the default is to use local _lowercase : Tuple = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model') self.assertEqual(config.__class__.__name__, 'NewModelConfigLocal') # If remote code is disabled, we load the local one. _lowercase : Optional[int] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model', trust_remote_code=lowerCamelCase) self.assertEqual(config.__class__.__name__, 'NewModelConfigLocal') # If remote is enabled, we load from the Hub _lowercase : List[str] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model', trust_remote_code=lowerCamelCase) self.assertEqual(config.__class__.__name__, 'NewModelConfig') finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
89
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileBertTokenizer lowerCAmelCase_ = MobileBertTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = filter_non_english lowerCAmelCase_ = '''google/mobilebert-uncased''' def UpperCAmelCase__ ( self : Dict ): """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __SCREAMING_SNAKE_CASE : int = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running''' return input_text, output_text def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase__ ( self : int ): """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) # With lower casing __SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __SCREAMING_SNAKE_CASE : Dict = {} for i, token in enumerate(_A ): __SCREAMING_SNAKE_CASE : List[str] = i __SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def UpperCAmelCase__ ( self : str ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus( _A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False __SCREAMING_SNAKE_CASE : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有'''] __SCREAMING_SNAKE_CASE : int = ''''''.join(_A ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE : List[Any] = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A ) ] self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A )
74
0
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch __UpperCAmelCase = logging.get_logger(__name__) class a__ ( a__ ): '''simple docstring''' lowercase__ : Optional[int] = ["pixel_values"] def __init__( self , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = PILImageResampling.BILINEAR , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = True , lowerCamelCase_ = 1 / 2_55 , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None: super().__init__(**lowerCamelCase_ ) lowerCAmelCase__ = size if size is not None else {'''shortest_edge''': 2_56} lowerCAmelCase__ = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ ) lowerCAmelCase__ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} lowerCAmelCase__ = get_size_dict(lowerCamelCase_ , param_name='''crop_size''' ) lowerCAmelCase__ = do_resize lowerCAmelCase__ = size lowerCAmelCase__ = resample lowerCAmelCase__ = do_center_crop lowerCAmelCase__ = crop_size lowerCAmelCase__ = do_rescale lowerCAmelCase__ = rescale_factor lowerCAmelCase__ = do_normalize lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = PILImageResampling.BICUBIC , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray: lowerCAmelCase__ = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) lowerCAmelCase__ = get_resize_output_image_size(lowerCamelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCamelCase_ ) return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray: lowerCAmelCase__ = get_size_dict(lowerCamelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(lowerCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCamelCase_ , **lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ ) -> np.ndarray: return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray: return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = ChannelDimension.FIRST , **lowerCamelCase_ , ) -> Dict: lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize lowerCAmelCase__ = size if size is not None else self.size lowerCAmelCase__ = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ ) lowerCAmelCase__ = resample if resample is not None else self.resample lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size lowerCAmelCase__ = get_size_dict(lowerCamelCase_ , param_name='''crop_size''' ) lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean lowerCAmelCase__ = image_std if image_std is not None else self.image_std lowerCAmelCase__ = make_list_of_images(lowerCamelCase_ ) if not valid_images(lowerCamelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowerCAmelCase__ = [to_numpy_array(lowerCamelCase_ ) for image in images] if do_resize: lowerCAmelCase__ = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images] if do_center_crop: lowerCAmelCase__ = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images] if do_rescale: lowerCAmelCase__ = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images] if do_normalize: lowerCAmelCase__ = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images] lowerCAmelCase__ = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images] lowerCAmelCase__ = {'''pixel_values''': images} return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[Any]: lowerCAmelCase__ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCamelCase_ ) != len(lowerCamelCase_ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(lowerCamelCase_ ): lowerCAmelCase__ = target_sizes.numpy() lowerCAmelCase__ = [] for idx in range(len(lowerCamelCase_ ) ): lowerCAmelCase__ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCamelCase_ ) lowerCAmelCase__ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCamelCase_ ) else: lowerCAmelCase__ = logits.argmax(dim=1 ) lowerCAmelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
90
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ): """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
74
0
"""simple docstring""" def _snake_case ( snake_case__ : list ): if not grid or not grid[0]: raise TypeError('The grid does not contain the appropriate information' ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] A = grid[0] for row_n in range(1 , len(snake_case__ ) ): A = grid[row_n] A = fill_row(snake_case__ , snake_case__ ) A = grid[row_n] return grid[-1][-1] def _snake_case ( snake_case__ : list , snake_case__ : list ): current_row[0] += row_above[0] for cell_n in range(1 , len(snake_case__ ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
91
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast lowercase_ = datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase_ = 1_00_00 lowerCAmelCase_ = None lowerCAmelCase_ = None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase_ = ParquetConfig def UpperCAmelCase__ ( self : Any ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ): """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): __SCREAMING_SNAKE_CASE : Tuple = data_files if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __SCREAMING_SNAKE_CASE : int = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_A ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) ) break splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) ) return splits def UpperCAmelCase__ ( self : str , _A : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' ) raise
74
0
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset UpperCamelCase_ = random.Random() def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any]=1.0 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Tuple=None ) -> Union[str, Any]: if rng is None: lowercase : List[Any] =global_rng lowercase : List[str] =[] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : Optional[Any]=400 , UpperCAmelCase__ : str=2000 , UpperCAmelCase__ : Dict=2048 , UpperCAmelCase__ : Union[str, Any]=128 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : int=30 , UpperCAmelCase__ : Dict=44100 , ): '''simple docstring''' lowercase : List[str] =parent lowercase : Dict =batch_size lowercase : List[Any] =min_seq_length lowercase : List[Any] =max_seq_length lowercase : List[str] =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowercase : Dict =spectrogram_length lowercase : str =feature_size lowercase : List[Any] =num_audio_channels lowercase : Optional[int] =hop_length lowercase : Optional[int] =chunk_length lowercase : Tuple =sampling_rate def lowerCamelCase_ ( self : str ): '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ): '''simple docstring''' def _flatten(UpperCAmelCase__ : Optional[Any] ): return list(itertools.chain(*UpperCAmelCase__ ) ) if equal_length: lowercase : str =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowercase : Any =[ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowercase : List[Any] =[np.asarray(UpperCAmelCase__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = TvltFeatureExtractor def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : List[Any] =TvltFeatureExtractionTester(self ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : str =self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , '''spectrogram_length''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''feature_size''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''num_audio_channels''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''hop_length''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''chunk_length''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''sampling_rate''' ) ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : Dict =self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase : Optional[Any] =feat_extract_first.save_pretrained(UpperCAmelCase__ )[0] check_json_file_has_correct_format(UpperCAmelCase__ ) lowercase : Optional[Any] =self.feature_extraction_class.from_pretrained(UpperCAmelCase__ ) lowercase : Any =feat_extract_first.to_dict() lowercase : Union[str, Any] =feat_extract_second.to_dict() lowercase : Tuple =dict_first.pop('''mel_filters''' ) lowercase : str =dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase : str =os.path.join(UpperCAmelCase__ , '''feat_extract.json''' ) feat_extract_first.to_json_file(UpperCAmelCase__ ) lowercase : Union[str, Any] =self.feature_extraction_class.from_json_file(UpperCAmelCase__ ) lowercase : List[Any] =feat_extract_first.to_dict() lowercase : int =feat_extract_second.to_dict() lowercase : Optional[int] =dict_first.pop('''mel_filters''' ) lowercase : Tuple =dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' # Initialize feature_extractor lowercase : List[Any] =self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 lowercase : Any =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase : Any =[np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs] # Test not batched input lowercase : Dict =feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched lowercase : str =feature_extractor(UpperCAmelCase__ , return_tensors='''np''' , sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking lowercase : Union[str, Any] =feature_extractor( UpperCAmelCase__ , return_tensors='''np''' , sampling_rate=44100 , mask_audio=UpperCAmelCase__ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. lowercase : Optional[int] =[floats_list((1, x) )[0] for x in (800, 800, 800)] lowercase : Dict =np.asarray(UpperCAmelCase__ ) lowercase : Union[str, Any] =feature_extractor(UpperCAmelCase__ , return_tensors='''np''' , sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Dict =load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech lowercase : Any =ds.sort('''id''' ).select(range(UpperCAmelCase__ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : str =self._load_datasamples(1 ) lowercase : List[str] =TvltFeatureExtractor() lowercase : Any =feature_extractor(UpperCAmelCase__ , return_tensors='''pt''' ).audio_values self.assertEquals(audio_values.shape , (1, 1, 192, 128) ) lowercase : List[Any] =torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCAmelCase__ , atol=1E-4 ) )
92
from math import isclose, sqrt def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x __SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4 __SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100 __SCREAMING_SNAKE_CASE : str = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __SCREAMING_SNAKE_CASE : int = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus __SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a__ ( snake_case = 1.4 , snake_case = -9.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : float = first_x_coord __SCREAMING_SNAKE_CASE : float = first_y_coord __SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
74
0
"""simple docstring""" from PIL import Image def __A (_SCREAMING_SNAKE_CASE ) ->Image: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ :Dict = image.size lowerCAmelCase__ :Dict = 0 lowerCAmelCase__ :Tuple = image.load() for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Optional[int] = pixels[j, i] mean += pixel mean //= width * height for j in range(_SCREAMING_SNAKE_CASE ): for i in range(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Dict = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": __A = mean_threshold(Image.open("""path_to_image""").convert("""L""")) image.save("""output_image_path""")
93
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20} __SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : int = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : Optional[Any] = num_channels __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : int = min_resolution __SCREAMING_SNAKE_CASE : Optional[int] = max_resolution __SCREAMING_SNAKE_CASE : List[Any] = do_resize __SCREAMING_SNAKE_CASE : Union[str, Any] = size __SCREAMING_SNAKE_CASE : str = do_center_crop __SCREAMING_SNAKE_CASE : Any = crop_size def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''crop_size''' ) ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : int ): """simple docstring""" pass def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
74
0
'''simple docstring''' from __future__ import annotations def lowercase_ ( __A : tuple[int, int] , __A : int ) -> list[tuple[int, int]]: """simple docstring""" lowercase , lowercase : Tuple =position lowercase : Optional[Any] =[ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] lowercase : int =[] for position in positions: lowercase , lowercase : List[str] =position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(__A ) return permissible_positions def lowercase_ ( __A : list[list[int]] ) -> bool: """simple docstring""" return not any(elem == 0 for row in board for elem in row ) def lowercase_ ( __A : list[list[int]] , __A : tuple[int, int] , __A : int ) -> bool: """simple docstring""" if is_complete(__A ): return True for position in get_valid_pos(__A , len(__A ) ): lowercase , lowercase : Union[str, Any] =position if board[y][x] == 0: lowercase : Optional[Any] =curr + 1 if open_knight_tour_helper(__A , __A , curr + 1 ): return True lowercase : Optional[int] =0 return False def lowercase_ ( __A : int ) -> list[list[int]]: """simple docstring""" lowercase : Dict =[[0 for i in range(__A )] for j in range(__A )] for i in range(__A ): for j in range(__A ): lowercase : Dict =1 if open_knight_tour_helper(__A , (i, j) , 1 ): return board lowercase : int =0 lowercase : List[Any] =F'Open Kight Tour cannot be performed on a board of size {n}' raise ValueError(__A ) if __name__ == "__main__": import doctest doctest.testmod()
94
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )] # initialize interval's left pointer and right pointer __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0 for i in range(1 , len(snake_case ) ): # case when current index is inside the interval if i <= right_pointer: __SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __SCREAMING_SNAKE_CASE : Dict = min_edge while go_next(snake_case , snake_case , snake_case ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1 return z_result def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]] def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(snake_case ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
74
0
"""simple docstring""" import numpy # List of input, output pairs lowerCamelCase_ = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) lowerCamelCase_ = (((515, 22, 13), 555), ((61, 35, 49), 150)) lowerCamelCase_ = [2, 4, 1, 5] lowerCamelCase_ = len(train_data) lowerCamelCase_ = 0.009 def snake_case ( A__ ,A__="train" ): return calculate_hypothesis_value(A__ ,A__ ) - output( A__ ,A__ ) def snake_case ( A__ ): UpperCAmelCase_ : Tuple = 0 for i in range(len(A__ ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def snake_case ( A__ ,A__ ): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def snake_case ( A__ ,A__ ): if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def snake_case ( A__ ,A__=m ): UpperCAmelCase_ : Optional[int] = 0 for i in range(A__ ): if index == -1: summation_value += _error(A__ ) else: summation_value += _error(A__ ) * train_data[i][0][index] return summation_value def snake_case ( A__ ): UpperCAmelCase_ : Dict = summation_of_cost_derivative(A__ ,A__ ) / m return cost_derivative_value def snake_case ( ): global parameter_vector # Tune these values to set a tolerance value for predicted output UpperCAmelCase_ : Dict = 0.000002 UpperCAmelCase_ : Optional[int] = 0 UpperCAmelCase_ : Tuple = 0 while True: j += 1 UpperCAmelCase_ : Tuple = [0, 0, 0, 0] for i in range(0 ,len(A__ ) ): UpperCAmelCase_ : Dict = get_cost_derivative(i - 1 ) UpperCAmelCase_ : Optional[Any] = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( A__ ,A__ ,atol=A__ ,rtol=A__ ,): break UpperCAmelCase_ : str = temp_parameter_vector print(("Number of iterations:", j) ) def snake_case ( ): for i in range(len(A__ ) ): print(("Actual output value:", output(A__ ,"test" )) ) print(("Hypothesis output:", calculate_hypothesis_value(A__ ,"test" )) ) if __name__ == "__main__": run_gradient_descent() print('''\nTesting gradient descent for a linear hypothesis function.\n''') test_gradient_descent()
95
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
0
"""simple docstring""" import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class __A ( unittest.TestCase ): def lowerCamelCase__ ( self : str ) -> List[Any]: __magic_name__: Dict = logging.get_logger() # the current default level is logging.WARNING __magic_name__: List[Any] = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(__snake_case ) def lowerCamelCase__ ( self : Any ) -> str: __magic_name__: str = logging.get_verbosity() __magic_name__: int = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) __magic_name__: int = """Testing 1, 2, 3""" # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(__snake_case ) as cl: logger.warning(__snake_case ) self.assertEqual(cl.out , msg + """\n""" ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(__snake_case ) as cl: logger.warning(__snake_case ) self.assertEqual(cl.out , """""" ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(__snake_case ) as cl: logger.warning(__snake_case ) self.assertEqual(cl.out , msg + """\n""" ) # restore to the original level logging.set_verbosity(__snake_case ) @mockenv(TRANSFORMERS_VERBOSITY="""error""" ) def lowerCamelCase__ ( self : Optional[int] ) -> Any: # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var __magic_name__: str = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) __magic_name__: List[str] = os.getenv("""TRANSFORMERS_VERBOSITY""" , __snake_case ) __magic_name__: Any = logging.log_levels[env_level_str] __magic_name__: Optional[Any] = logging.get_verbosity() self.assertEqual( __snake_case , __snake_case , F'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , ) # restore to the original level __magic_name__: Optional[Any] = """""" transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY="""super-error""" ) def lowerCamelCase__ ( self : str ) -> Optional[int]: # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() __magic_name__: Tuple = logging.logging.getLogger() with CaptureLogger(__snake_case ) as cl: # this action activates the env var logging.get_logger("""transformers.models.bart.tokenization_bart""" ) self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out ) # no need to restore as nothing was changed def lowerCamelCase__ ( self : Tuple ) -> int: # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() __magic_name__: Union[str, Any] = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) __magic_name__: Any = """Testing 1, 2, 3""" with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ): # nothing should be logged as env var disables this method with CaptureLogger(__snake_case ) as cl: logger.warning_advice(__snake_case ) self.assertEqual(cl.out , """""" ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(__snake_case ) as cl: logger.warning_advice(__snake_case ) self.assertEqual(cl.out , msg + """\n""" ) def a ( ) -> Any: disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
96
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig() # derive patch size from model name __SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 __SCREAMING_SNAKE_CASE : Optional[Any] = 12 __SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 __SCREAMING_SNAKE_CASE : int = 4_096 __SCREAMING_SNAKE_CASE : Tuple = 16 __SCREAMING_SNAKE_CASE : Optional[int] = 24 __SCREAMING_SNAKE_CASE : Optional[int] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 if model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Any = 336 __SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Any = 768 return config def a__ ( snake_case ): """simple docstring""" # text encoder if name == "token_embedding.weight": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' ) if key.startswith('''visual''' ): __SCREAMING_SNAKE_CASE : List[Any] = key_split[3] __SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Optional[Any] = val[ :dim ] __SCREAMING_SNAKE_CASE : Tuple = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim: ] else: if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : str = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Dict = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[-dim:] elif key.startswith('''mit''' ): __SCREAMING_SNAKE_CASE : List[str] = key_split[2] __SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : str = val[:dim, :] __SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Any = val[:dim] __SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2] __SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:] else: __SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2] __SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[:dim, :] __SCREAMING_SNAKE_CASE : int = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Dict = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Tuple = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : int = val[-dim:] else: __SCREAMING_SNAKE_CASE : int = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __SCREAMING_SNAKE_CASE : int = val.T __SCREAMING_SNAKE_CASE : Union[str, Any] = val return orig_state_dict def a__ ( snake_case ): """simple docstring""" if num_frames == 8: __SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy''' elif num_frames == 16: __SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy''' elif num_frames == 32: __SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy''' __SCREAMING_SNAKE_CASE : List[str] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , ) __SCREAMING_SNAKE_CASE : int = np.load(snake_case ) return list(snake_case ) def a__ ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name] __SCREAMING_SNAKE_CASE : Any = 8 if "16-frames" in model_name: __SCREAMING_SNAKE_CASE : Optional[int] = 16 elif "shot" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 32 __SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin''' gdown.cached_download(snake_case , snake_case , quiet=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model'''] else: __SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224 __SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case ) __SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) __SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case ) # Verify outputs __SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video __SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 ) print('''Probs:''' , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] ) elif model_name == "xclip-base-patch16": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] ) elif model_name == "xclip-large-patch14": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case , organization='''nielsr''' ) processor.push_to_hub(snake_case , organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
0
from __future__ import annotations from math import pow, sqrt def a ( snake_case__: float , snake_case__: float , snake_case__: float ): '''simple docstring''' if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if resistance == 0: return {"resistance": sqrt(pow(snake_case__ , 2 ) - pow(snake_case__ , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(snake_case__ , 2 ) - pow(snake_case__ , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(snake_case__ , 2 ) + pow(snake_case__ , 2 ) )} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
97
from pathlib import Path import fire def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = Path(snake_case ) __SCREAMING_SNAKE_CASE : Dict = Path(snake_case ) dest_dir.mkdir(exist_ok=snake_case ) for path in src_dir.iterdir(): __SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] __SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name ) print(snake_case ) dest_path.open('''w''' ).write('''\n'''.join(snake_case ) ) if __name__ == "__main__": fire.Fire(minify)
74
0
'''simple docstring''' import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def a__ ( lowercase : Tuple, lowercase : Optional[int], lowercase : str=None ) -> Tuple: """simple docstring""" assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match""" _UpperCamelCase = nn.Parameter(lowercase ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match""" _UpperCamelCase = nn.Parameter(lowercase ) def a__ ( lowercase : Optional[int], lowercase : List[Any], lowercase : Optional[int] ) -> Optional[int]: """simple docstring""" _UpperCamelCase = np.asarray(weights[0] ) _UpperCamelCase = np.asarray(weights[1] ) _UpperCamelCase = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key, torch.tensor(lowercase ).transpose(1, 2 ).contiguous().view(-1, lowercase ), ) set_param( torch_layer.self_attention.value, torch.tensor(lowercase ).transpose(1, 2 ).contiguous().view(-1, lowercase ), ) set_param( torch_layer.output.dense, torch.tensor(lowercase ).view(-1, lowercase ).contiguous().transpose(0, 1 ), ) def a__ ( lowercase : int, lowercase : int, lowercase : str ) -> Dict: """simple docstring""" _UpperCamelCase = np.asarray(weights[0] ) _UpperCamelCase = np.asarray(weights[1] ) _UpperCamelCase = np.asarray(weights[2] ) _UpperCamelCase = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query, torch.tensor(lowercase ).transpose(1, 2 ).contiguous().view(-1, lowercase ), ) set_param( torch_layer.self_attention.key, torch.tensor(lowercase ).transpose(1, 2 ).contiguous().view(-1, lowercase ), ) set_param( torch_layer.self_attention.value, torch.tensor(lowercase ).transpose(1, 2 ).contiguous().view(-1, lowercase ), ) set_param( torch_layer.output.dense, torch.tensor(lowercase ).view(-1, lowercase ).contiguous().transpose(0, 1 ), ) def a__ ( lowercase : List[Any], lowercase : List[str], lowercase : Tuple ) -> Dict: """simple docstring""" _UpperCamelCase = weights[0][0][0] _UpperCamelCase = np.asarray(layer_norm_a[0] ) _UpperCamelCase = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm, torch.tensor(lowercase ), torch.tensor(lowercase ), ) # lsh weights + output _UpperCamelCase = weights[0][1] if len(lowercase ) < 4: set_layer_weights_in_torch_lsh(lowercase, torch_block.attention, lowercase ) else: set_layer_weights_in_torch_local(lowercase, torch_block.attention, lowercase ) # intermediate weighs _UpperCamelCase = weights[2][0][1][2] # Chunked Feed Forward if len(lowercase ) == 4: _UpperCamelCase = intermediate_weights[2] # layernorm 2 _UpperCamelCase = np.asarray(intermediate_weights[0][0] ) _UpperCamelCase = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm, torch.tensor(lowercase ), torch.tensor(lowercase ), ) # intermediate dense _UpperCamelCase = np.asarray(intermediate_weights[1][0] ) _UpperCamelCase = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense, torch.tensor(lowercase ).transpose(0, 1 ).contiguous(), torch.tensor(lowercase ), ) # intermediate out _UpperCamelCase = np.asarray(intermediate_weights[4][0] ) _UpperCamelCase = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense, torch.tensor(lowercase ).transpose(0, 1 ).contiguous(), torch.tensor(lowercase ), ) def a__ ( lowercase : int, lowercase : str, lowercase : List[str] ) -> str: """simple docstring""" _UpperCamelCase = torch_model.reformer # word embeds _UpperCamelCase = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings, torch.tensor(lowercase ), ) if isinstance(weights[3], lowercase ): _UpperCamelCase = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): _UpperCamelCase = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F"""{position_embeddings[emb_idx]} emb does not match""" _UpperCamelCase = nn.Parameter(torch.tensor(lowercase ) ) _UpperCamelCase = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( lowercase ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): _UpperCamelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(lowercase, lowercase, lowercase ) # output layer norm _UpperCamelCase = np.asarray(weights[7][0] ) _UpperCamelCase = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm, torch.tensor(lowercase ), torch.tensor(lowercase ), ) # output embeddings _UpperCamelCase = np.asarray(weights[9][0] ) _UpperCamelCase = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder, torch.tensor(lowercase ).transpose(0, 1 ).contiguous(), torch.tensor(lowercase ), ) def a__ ( lowercase : List[Any], lowercase : Optional[int], lowercase : Tuple ) -> List[Any]: """simple docstring""" _UpperCamelCase = ReformerConfig.from_json_file(lowercase ) print(F"""Building PyTorch model from configuration: {config}""" ) _UpperCamelCase = ReformerModelWithLMHead(lowercase ) with open(lowercase, '''rb''' ) as f: _UpperCamelCase = pickle.load(lowercase )['''weights'''] set_model_weights_in_torch(lowercase, lowercase, config.hidden_size ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict(), lowercase ) if __name__ == "__main__": lowercase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowercase__ : Union[str, Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
98
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 ) __SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 ) __SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
74
0
SCREAMING_SNAKE_CASE = { 0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 1_0: 'a', 1_1: 'b', 1_2: 'c', 1_3: 'd', 1_4: 'e', 1_5: 'f', } def a (lowerCAmelCase__ ): assert type(lowerCAmelCase__ ) in (int, float) and decimal == int(lowerCAmelCase__ ) __a = int(lowerCAmelCase__ ) __a = """""" __a = False if decimal < 0: __a = True decimal *= -1 while decimal > 0: __a , __a = divmod(lowerCAmelCase__ , 16 ) __a = values[remainder] + hexadecimal __a = """0x""" + hexadecimal if negative: __a = """-""" + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
99
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} ) lowerCAmelCase_ = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) lowerCAmelCase_ = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = {} if self.train_dir is not None: __SCREAMING_SNAKE_CASE : Dict = self.train_dir if self.validation_dir is not None: __SCREAMING_SNAKE_CASE : Any = self.validation_dir __SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) lowerCAmelCase_ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class __UpperCamelCase : """simple docstring""" def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = input_size __SCREAMING_SNAKE_CASE : List[str] = mask_patch_size __SCREAMING_SNAKE_CASE : Dict = model_patch_size __SCREAMING_SNAKE_CASE : int = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('''Input size must be divisible by mask patch size''' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('''Mask patch size must be divisible by model patch size''' ) __SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size __SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size __SCREAMING_SNAKE_CASE : int = self.rand_size**2 __SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count] __SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = 1 __SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) ) __SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] ) __SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def a__ ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mim''' , snake_case , snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level() logger.setLevel(snake_case ) transformers.utils.logging.set_verbosity(snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. __SCREAMING_SNAKE_CASE : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0: __SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split ) __SCREAMING_SNAKE_CASE : int = split['''train'''] __SCREAMING_SNAKE_CASE : Dict = split['''test'''] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE : List[Any] = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: __SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(snake_case , '''decoder_type''' ): __SCREAMING_SNAKE_CASE : Any = '''simmim''' # adapt config __SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size __SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size __SCREAMING_SNAKE_CASE : str = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { '''image_size''': model_args.image_size, '''patch_size''': model_args.patch_size, '''encoder_stride''': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: __SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } __SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case ) if training_args.do_train: __SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names else: __SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names if data_args.image_column_name is not None: __SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name elif "image" in column_names: __SCREAMING_SNAKE_CASE : str = '''image''' elif "img" in column_names: __SCREAMING_SNAKE_CASE : List[str] = '''img''' else: __SCREAMING_SNAKE_CASE : Tuple = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py __SCREAMING_SNAKE_CASE : Any = Compose( [ Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator __SCREAMING_SNAKE_CASE : str = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(snake_case ): __SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]] __SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE : Union[str, Any] = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case ) # Initialize our trainer __SCREAMING_SNAKE_CASE : List[str] = Trainer( model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE : int = last_checkpoint __SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate() trainer.log_metrics('''eval''' , snake_case ) trainer.save_metrics('''eval''' , snake_case ) # Write model card and (optionally) push to hub __SCREAMING_SNAKE_CASE : Optional[Any] = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''masked-image-modeling''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-image-modeling'''], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case ) else: trainer.create_model_card(**snake_case ) if __name__ == "__main__": main()
74
0
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html _A : Any = """platform""" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Any: if attention_mask is None: SCREAMING_SNAKE_CASE__ = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: SCREAMING_SNAKE_CASE__ = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: SCREAMING_SNAKE_CASE__ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: SCREAMING_SNAKE_CASE__ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class __snake_case : '''simple docstring''' def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=16 , A_=2 , A_=4 , A_=4 , A_="gelu" , A_=0.1 , A_=0.1 , A_=32 , A_=2 , A_=1 , A_=0 , A_=0.02 , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = seq_length SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = intermediate_size SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = eos_token_id SCREAMING_SNAKE_CASE__ = pad_token_id SCREAMING_SNAKE_CASE__ = bos_token_id SCREAMING_SNAKE_CASE__ = initializer_range def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) SCREAMING_SNAKE_CASE__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) SCREAMING_SNAKE_CASE__ = shift_tokens_right(A_ , 1 , 2 ) SCREAMING_SNAKE_CASE__ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=A_ , ) SCREAMING_SNAKE_CASE__ = prepare_blenderbot_inputs_dict(A_ , A_ , A_ ) return config, inputs_dict def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() return config, inputs_dict def lowercase_ ( self , A_ , A_ , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = 20 SCREAMING_SNAKE_CASE__ = model_class_name(A_ ) SCREAMING_SNAKE_CASE__ = model.encode(inputs_dict['''input_ids'''] ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) SCREAMING_SNAKE_CASE__ = model.init_cache(decoder_input_ids.shape[0] , A_ , A_ ) SCREAMING_SNAKE_CASE__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) SCREAMING_SNAKE_CASE__ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) SCREAMING_SNAKE_CASE__ = model.decode( decoder_input_ids[:, :-1] , A_ , decoder_attention_mask=A_ , past_key_values=A_ , decoder_position_ids=A_ , ) SCREAMING_SNAKE_CASE__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) SCREAMING_SNAKE_CASE__ = model.decode( decoder_input_ids[:, -1:] , A_ , decoder_attention_mask=A_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A_ , ) SCREAMING_SNAKE_CASE__ = model.decode(A_ , A_ ) SCREAMING_SNAKE_CASE__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) def lowercase_ ( self , A_ , A_ , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = 20 SCREAMING_SNAKE_CASE__ = model_class_name(A_ ) SCREAMING_SNAKE_CASE__ = model.encode(inputs_dict['''input_ids'''] ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) SCREAMING_SNAKE_CASE__ = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) SCREAMING_SNAKE_CASE__ = model.init_cache(decoder_input_ids.shape[0] , A_ , A_ ) SCREAMING_SNAKE_CASE__ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) SCREAMING_SNAKE_CASE__ = model.decode( decoder_input_ids[:, :-1] , A_ , decoder_attention_mask=A_ , past_key_values=A_ , decoder_position_ids=A_ , ) SCREAMING_SNAKE_CASE__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) SCREAMING_SNAKE_CASE__ = model.decode( decoder_input_ids[:, -1:] , A_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A_ , decoder_position_ids=A_ , ) SCREAMING_SNAKE_CASE__ = model.decode(A_ , A_ , decoder_attention_mask=A_ ) SCREAMING_SNAKE_CASE__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) @require_flax class __snake_case ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = 9_9 def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) SCREAMING_SNAKE_CASE__ = input_ids.shape[0] SCREAMING_SNAKE_CASE__ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._get_config_and_data() SCREAMING_SNAKE_CASE__ = FlaxBlenderbotForConditionalGeneration(A_ ) SCREAMING_SNAKE_CASE__ = lm_model(input_ids=A_ ) SCREAMING_SNAKE_CASE__ = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) SCREAMING_SNAKE_CASE__ = FlaxBlenderbotForConditionalGeneration(A_ ) SCREAMING_SNAKE_CASE__ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) SCREAMING_SNAKE_CASE__ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) SCREAMING_SNAKE_CASE__ = lm_model(input_ids=A_ , decoder_input_ids=A_ ) SCREAMING_SNAKE_CASE__ = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) SCREAMING_SNAKE_CASE__ = shift_tokens_right(A_ , 1 , 2 ) SCREAMING_SNAKE_CASE__ = np.equal(A_ , 1 ).astype(np.floataa ).sum() SCREAMING_SNAKE_CASE__ = np.equal(A_ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(A_ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : Union[str, Any] = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowerCamelCase__ : int = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = FlaxBlenderbotModelTester(self ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(A_ , A_ , A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(A_ , A_ , A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): SCREAMING_SNAKE_CASE__ = self._prepare_for_class(A_ , A_ ) SCREAMING_SNAKE_CASE__ = model_class(A_ ) @jax.jit def encode_jitted(A_ , A_=None , **A_ ): return model.encode(input_ids=A_ , attention_mask=A_ ) with self.subTest('''JIT Enabled''' ): SCREAMING_SNAKE_CASE__ = encode_jitted(**A_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): SCREAMING_SNAKE_CASE__ = encode_jitted(**A_ ).to_tuple() self.assertEqual(len(A_ ) , len(A_ ) ) for jitted_output, output in zip(A_ , A_ ): self.assertEqual(jitted_output.shape , output.shape ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): SCREAMING_SNAKE_CASE__ = model_class(A_ ) SCREAMING_SNAKE_CASE__ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) SCREAMING_SNAKE_CASE__ = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(A_ , A_ , A_ ): return model.decode( decoder_input_ids=A_ , decoder_attention_mask=A_ , encoder_outputs=A_ , ) with self.subTest('''JIT Enabled''' ): SCREAMING_SNAKE_CASE__ = decode_jitted(**A_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): SCREAMING_SNAKE_CASE__ = decode_jitted(**A_ ).to_tuple() self.assertEqual(len(A_ ) , len(A_ ) ) for jitted_output, output in zip(A_ , A_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowercase_ ( self ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE__ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids SCREAMING_SNAKE_CASE__ = np.ones((1, 1) ) * model.config.eos_token_id SCREAMING_SNAKE_CASE__ = model(A_ ) self.assertIsNotNone(A_ ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25} SCREAMING_SNAKE_CASE__ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} SCREAMING_SNAKE_CASE__ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=A_ ) SCREAMING_SNAKE_CASE__ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) SCREAMING_SNAKE_CASE__ = ['''Sam'''] SCREAMING_SNAKE_CASE__ = tokenizer(A_ , return_tensors='''jax''' ) SCREAMING_SNAKE_CASE__ = model.generate(**A_ , **A_ ) SCREAMING_SNAKE_CASE__ = '''Sam is a great name. It means "sun" in Gaelic.''' SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(A_ , **A_ ) assert generated_txt[0].strip() == tgt_text
100
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''data2vec-vision''' def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Any = hidden_size __SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = initializer_range __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : Any = image_size __SCREAMING_SNAKE_CASE : Optional[int] = patch_size __SCREAMING_SNAKE_CASE : Any = num_channels __SCREAMING_SNAKE_CASE : List[str] = use_mask_token __SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings __SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias __SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value __SCREAMING_SNAKE_CASE : str = drop_path_rate __SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : str = out_indices __SCREAMING_SNAKE_CASE : List[str] = pool_scales # auxiliary head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight __SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels __SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input __SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return 1e-4
74
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase__ : Tuple ={ 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Tuple =['PerceiverFeatureExtractor'] lowerCAmelCase__ : List[str] =['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Optional[Any] =[ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys lowerCAmelCase__ : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
101
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=7 , _A : List[str]=True , _A : Dict=True , _A : Tuple=False , _A : Union[str, Any]=True , _A : List[str]=99 , _A : Union[str, Any]=32 , _A : str=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : Optional[int]=4 , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : str = seq_length __SCREAMING_SNAKE_CASE : int = is_training __SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask __SCREAMING_SNAKE_CASE : str = use_token_type_ids __SCREAMING_SNAKE_CASE : Any = use_labels __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : Any = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads __SCREAMING_SNAKE_CASE : List[str] = intermediate_size __SCREAMING_SNAKE_CASE : List[str] = hidden_act __SCREAMING_SNAKE_CASE : int = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size __SCREAMING_SNAKE_CASE : int = initializer_range __SCREAMING_SNAKE_CASE : List[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = num_choices __SCREAMING_SNAKE_CASE : Union[str, Any] = scope def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: __SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE : Any = None __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : int = None if self.use_labels: __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForMaskedLM(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : int = model( _A , attention_mask=_A , start_positions=_A , end_positions=_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels __SCREAMING_SNAKE_CASE : List[Any] = DistilBertForSequenceClassification(_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : List[str] , _A : int , _A : List[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForTokenClassification(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : int , _A : Optional[int] , _A : List[Any] , _A : int , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.num_choices __SCREAMING_SNAKE_CASE : int = DistilBertForMultipleChoice(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Optional[Any] = model( _A , attention_mask=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs __SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCAmelCase_ = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self ) __SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , dim=37 ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*_A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*_A ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*_A ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @slow @require_torch_gpu def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A ) __SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace( _A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) ) __SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A ) loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )[0] __SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _A ) __SCREAMING_SNAKE_CASE : Any = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
74
0
"""simple docstring""" from __future__ import annotations __magic_name__ : Any = """Muhammad Umer Farooq""" __magic_name__ : int = """MIT""" __magic_name__ : List[Any] = """1.0.0""" __magic_name__ : Optional[Any] = """Muhammad Umer Farooq""" __magic_name__ : Tuple = """contact@muhammadumerfarooq.me""" __magic_name__ : Tuple = """Alpha""" import re from html.parser import HTMLParser from urllib import parse import requests class lowercase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self , _A ): '''simple docstring''' super().__init__() UpperCamelCase : list[str] = [] UpperCamelCase : Dict = domain def _a ( self , _A , _A ): '''simple docstring''' if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: UpperCamelCase : Any = parse.urljoin(self.domain , _A ) self.urls.append(_A ) def UpperCamelCase (SCREAMING_SNAKE_CASE ): return ".".join(get_sub_domain_name(SCREAMING_SNAKE_CASE ).split(""".""" )[-2:] ) def UpperCamelCase (SCREAMING_SNAKE_CASE ): return parse.urlparse(SCREAMING_SNAKE_CASE ).netloc def UpperCamelCase (SCREAMING_SNAKE_CASE = "https://github.com" ): UpperCamelCase : Dict = get_domain_name(SCREAMING_SNAKE_CASE ) # Initialize the parser UpperCamelCase : int = Parser(SCREAMING_SNAKE_CASE ) try: # Open URL UpperCamelCase : str = requests.get(SCREAMING_SNAKE_CASE ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through UpperCamelCase : List[str] = set() for link in parser.urls: # open URL. # read = requests.get(link) try: UpperCamelCase : Dict = requests.get(SCREAMING_SNAKE_CASE ) # Get the valid email. UpperCamelCase : List[Any] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(SCREAMING_SNAKE_CASE ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __magic_name__ : str = emails_from_url("""https://github.com""") print(f'''{len(emails)} emails found:''') print("""\n""".join(sorted(emails)))
102
import logging import os import threading import time try: import warnings except ImportError: lowercase_ = None try: import msvcrt except ImportError: lowercase_ = None try: import fcntl except ImportError: lowercase_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowercase_ = OSError # Data # ------------------------------------------------ lowercase_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] lowercase_ = """3.0.12""" lowercase_ = None def a__ ( ): """simple docstring""" global _logger __SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ ) return _logger class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file return None def __str__( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[Any] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = lock return None def __enter__( self : Any ): """simple docstring""" return self.lock def __exit__( self : str , _A : Any , _A : int , _A : Any ): """simple docstring""" self.lock.release() return None class __UpperCamelCase : """simple docstring""" def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A ) # The path to the lock file. __SCREAMING_SNAKE_CASE : Tuple = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __SCREAMING_SNAKE_CASE : str = None # The default timeout value. __SCREAMING_SNAKE_CASE : Any = timeout # We use this lock primarily for the lock counter. __SCREAMING_SNAKE_CASE : int = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __SCREAMING_SNAKE_CASE : int = 0 return None @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._timeout @timeout.setter def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = float(_A ) return None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" raise NotImplementedError() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" raise NotImplementedError() @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file_fd is not None def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ): """simple docstring""" if timeout is None: __SCREAMING_SNAKE_CASE : Optional[int] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __SCREAMING_SNAKE_CASE : Tuple = id(self ) __SCREAMING_SNAKE_CASE : Any = self._lock_file __SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(_A ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCAmelCase__ ( self : int , _A : List[str]=False ): """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __SCREAMING_SNAKE_CASE : Optional[int] = id(self ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() __SCREAMING_SNAKE_CASE : int = 0 logger().debug(F'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : int ): """simple docstring""" self.acquire() return self def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ): """simple docstring""" self.release() return None def __del__( self : int ): """simple docstring""" self.release(force=_A ) return None def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = os.path.basename(_A ) if len(_A ) > max_length and max_length > 0: __SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) ) __SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(_A , _A ) else: return path class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ): """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(_A , timeout=_A , max_filename_length=_A ) __SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A ) except OSError: pass else: try: msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : str = fd return None def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self._lock_file_fd __SCREAMING_SNAKE_CASE : int = None msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 ) os.close(_A ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax super().__init__(_A , timeout=_A , max_filename_length=_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC __SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A ) try: fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : int = fd return None def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd __SCREAMING_SNAKE_CASE : Any = None fcntl.flock(_A , fcntl.LOCK_UN ) os.close(_A ) return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A ) except OSError: pass else: __SCREAMING_SNAKE_CASE : List[str] = fd return None def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" os.close(self._lock_file_fd ) __SCREAMING_SNAKE_CASE : Optional[Any] = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowercase_ = None if msvcrt: lowercase_ = WindowsFileLock elif fcntl: lowercase_ = UnixFileLock else: lowercase_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
74
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer snake_case = logging.get_logger(__name__) snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} snake_case = { '''vocab_file''': { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt''' ), } } snake_case = { '''junnyu/roformer_chinese_small''': 1_5_3_6, '''junnyu/roformer_chinese_base''': 1_5_3_6, '''junnyu/roformer_chinese_char_small''': 5_1_2, '''junnyu/roformer_chinese_char_base''': 5_1_2, '''junnyu/roformer_small_discriminator''': 1_2_8, '''junnyu/roformer_small_generator''': 1_2_8, } snake_case = { '''junnyu/roformer_chinese_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_base''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True}, '''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True}, '''junnyu/roformer_small_generator''': {'''do_lower_case''': True}, } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): A__ : str = VOCAB_FILES_NAMES A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP A__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Dict = PRETRAINED_INIT_CONFIGURATION A__ : Any = RoFormerTokenizer def __init__( self : Optional[int] , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : int=True , __lowerCamelCase : Any="[UNK]" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Optional[Any]="[PAD]" , __lowerCamelCase : Union[str, Any]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : Any , ): """simple docstring""" super().__init__( __lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , ) _snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case or pre_tok_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents ): _snake_case = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) ) _snake_case = do_lower_case _snake_case = strip_accents _snake_case = pre_tok_class(**__lowerCamelCase ) _snake_case = do_lower_case def __getstate__( self : int ): """simple docstring""" _snake_case = self.__dict__.copy() _snake_case = BertPreTokenizer() return state def __setstate__( self : Dict , __lowerCamelCase : Optional[Any] ): """simple docstring""" _snake_case = d _snake_case = self.__dict__['''_tokenizer'''].get_vocab() _snake_case = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) ) def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ): """simple docstring""" _snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCAmelCase ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): """simple docstring""" _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): """simple docstring""" _snake_case = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[str]=False , **__lowerCamelCase : List[Any] , ): """simple docstring""" _snake_case = BertPreTokenizer() return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
103
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[Any] , **_A : Dict ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**_A ) def UpperCAmelCase__ ( self : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : Any = [] __SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) ) __SCREAMING_SNAKE_CASE : Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' ) __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = [] for element in html_code.descendants: if type(_A ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip() if not text_in_this_tag: continue all_doc_strings.append(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A ) stringaxtag_seq.append(_A ) stringaxsubs_seq.append(_A ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' for tagname, subs in zip(_A , _A ): xpath += F'''/{tagname}''' if subs != 0: xpath += F'''[{subs}]''' return xpath def __call__( self : Optional[int] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = False # Check that strings has a valid type if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = True elif isinstance(_A , (list, tuple) ): if len(_A ) == 0 or isinstance(html_strings[0] , _A ): __SCREAMING_SNAKE_CASE : List[Any] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F'''but is of type {type(_A )}.''' ) __SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) ) if not is_batched: __SCREAMING_SNAKE_CASE : Dict = [html_strings] # Get nodes + xpaths __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Tuple = [] for html_string in html_strings: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A ) nodes.append(_A ) __SCREAMING_SNAKE_CASE : Dict = [] for node, tag_list, sub_list in zip(_A , _A , _A ): __SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A ) xpath_strings.append(_A ) xpaths.append(_A ) # return as Dict __SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths} __SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A ) return encoded_inputs
74
0
"""simple docstring""" import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu""" def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : Tuple=100, UpperCAmelCase_ : str=" " ) -> List[str]: """simple docstring""" A__ = text.split(UpperCAmelCase_ ) return [character.join(text[i : i + n] ).strip() for i in range(0, len(UpperCAmelCase_ ), UpperCAmelCase_ )] def _lowerCamelCase ( UpperCAmelCase_ : dict ) -> dict: """simple docstring""" A__ , A__ = [], [] for title, text in zip(documents["title"], documents["text"] ): if text is not None: for passage in split_text(UpperCAmelCase_ ): titles.append(title if title is not None else "" ) texts.append(UpperCAmelCase_ ) return {"title": titles, "text": texts} def _lowerCamelCase ( UpperCAmelCase_ : dict, UpperCAmelCase_ : DPRContextEncoder, UpperCAmelCase_ : DPRContextEncoderTokenizerFast ) -> dict: """simple docstring""" A__ = ctx_tokenizer( documents["title"], documents["text"], truncation=UpperCAmelCase_, padding="longest", return_tensors="pt" )["input_ids"] A__ = ctx_encoder(input_ids.to(device=UpperCAmelCase_ ), return_dict=UpperCAmelCase_ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def _lowerCamelCase ( UpperCAmelCase_ : "RagExampleArguments", UpperCAmelCase_ : "ProcessingArguments", UpperCAmelCase_ : "IndexHnswArguments", ) -> int: """simple docstring""" logger.info("Step 1 - Create the dataset" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way A__ = load_dataset( "csv", data_files=[rag_example_args.csv_path], split="train", delimiter="\t", column_names=["title", "text"] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words A__ = dataset.map(UpperCAmelCase_, batched=UpperCAmelCase_, num_proc=processing_args.num_proc ) # And compute the embeddings A__ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCAmelCase_ ) A__ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) A__ = Features( {"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space A__ = dataset.map( partial(UpperCAmelCase_, ctx_encoder=UpperCAmelCase_, ctx_tokenizer=UpperCAmelCase_ ), batched=UpperCAmelCase_, batch_size=processing_args.batch_size, features=UpperCAmelCase_, ) # And finally save your dataset A__ = os.path.join(rag_example_args.output_dir, "my_knowledge_dataset" ) dataset.save_to_disk(UpperCAmelCase_ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("Step 2 - Index the dataset" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search A__ = faiss.IndexHNSWFlat(index_hnsw_args.d, index_hnsw_args.m, faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("embeddings", custom_index=UpperCAmelCase_ ) # And save the index A__ = os.path.join(rag_example_args.output_dir, "my_knowledge_dataset_hnsw_index.faiss" ) dataset.get_index("embeddings" ).save(UpperCAmelCase_ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class UpperCamelCase__ : """simple docstring""" A__ : str = field( default=str(Path(_lowerCAmelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) A__ : Optional[str] = field( default=_lowerCAmelCase , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) A__ : str = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) A__ : str = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) A__ : Optional[str] = field( default=str(Path(_lowerCAmelCase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class UpperCamelCase__ : """simple docstring""" A__ : Optional[int] = field( default=_lowerCAmelCase , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) A__ : int = field( default=1_6 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class UpperCamelCase__ : """simple docstring""" A__ : int = field( default=7_6_8 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) A__ : int = field( default=1_2_8 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
104
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger() def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ): """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": __SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case ) else: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case ) if hidden_sizes == 192: __SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case ) if hidden_sizes == 256: __SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case ) if hidden_sizes == 384: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case ) from_model.eval() __SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval() __SCREAMING_SNAKE_CASE : int = OrderedDict() __SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict() __SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() ) __SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() ) print(len(snake_case ) , len(snake_case ) ) for i in range(len(snake_case ) ): __SCREAMING_SNAKE_CASE : int = weights[og_keys[i]] our_model.load_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) ) __SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one." __SCREAMING_SNAKE_CASE : Union[str, Any] = name print(snake_case ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'''Pushed {checkpoint_name}''' ) def a__ ( snake_case , snake_case = None , snake_case = True ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json''' __SCREAMING_SNAKE_CASE : int = 1_000 __SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels) __SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE : Optional[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : str = idalabel __SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''levit-128S''': 128, '''levit-128''': 128, '''levit-192''': 192, '''levit-256''': 256, '''levit-384''': 384, } __SCREAMING_SNAKE_CASE : Optional[int] = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case ) return config, expected_shape if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) lowercase_ = parser.parse_args() lowercase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
74
0
import math def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float ) -> float: """simple docstring""" if initial_intensity < 0: raise ValueError('The value of intensity cannot be negative' ) # handling of negative values of initial intensity if angle < 0 or angle > 3_60: raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(lowerCamelCase_ ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='''malus_law''')
105
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase __snake_case :List[Any] =logging.get_logger(__name__) __snake_case :Tuple ={ 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json', 'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json', 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json' ), } class lowerCAmelCase__ ( _lowerCamelCase ): A_ : Optional[int] = 'longformer' def __init__( self : int , __UpperCamelCase : Union[List[int], int] = 512 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 1 , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 30_522 , __UpperCamelCase : int = 768 , __UpperCamelCase : int = 12 , __UpperCamelCase : int = 12 , __UpperCamelCase : int = 3_072 , __UpperCamelCase : str = "gelu" , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 2 , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : float = 1e-12 , __UpperCamelCase : bool = False , **__UpperCamelCase : int , ) -> Optional[int]: super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase ) A = attention_window A = sep_token_id A = bos_token_id A = eos_token_id A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_act A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = initializer_range A = layer_norm_eps A = onnx_export class lowerCAmelCase__ ( _lowerCamelCase ): def __init__( self : Union[str, Any] , __UpperCamelCase : "PretrainedConfig" , __UpperCamelCase : str = "default" , __UpperCamelCase : "List[PatchingSpec]" = None ) -> Any: super().__init__(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A = True @property def __UpperCamelCase ( self : int ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": A = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('global_attention_mask', dynamic_axis), ] ) @property def __UpperCamelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]: A = super().outputs if self.task == "default": A = {0: 'batch'} return outputs @property def __UpperCamelCase ( self : Optional[Any] ) -> float: return 1e-4 @property def __UpperCamelCase ( self : List[Any] ) -> int: # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 14 ) def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : "PreTrainedTokenizerBase" , __UpperCamelCase : int = -1 , __UpperCamelCase : int = -1 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: A = super().generate_dummy_inputs( preprocessor=__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly A = torch.zeros_like(inputs['input_ids'] ) # make every second token global A = 1 return inputs
106
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowercase_ = logging.get_logger(__name__) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = set() __SCREAMING_SNAKE_CASE : str = [] def parse_line(snake_case ): for line in fp: if isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(snake_case ) buffer.clear() continue else: __SCREAMING_SNAKE_CASE : int = line.strip() buffer.append(snake_case ) if from_gh: for filename in os.listdir(snake_case ): __SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case ) if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with open(snake_case ) as fp: parse_line(snake_case ) else: try: with zipfile.ZipFile(snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with z.open(snake_case ) as fp: parse_line(snake_case ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = set() __SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) ) return selected_warnings if __name__ == "__main__": def a__ ( snake_case ): """simple docstring""" return values.split(''',''' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) lowercase_ = parser.parse_args() lowercase_ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowercase_ = extract_warnings(args.output_dir, args.targets) lowercase_ = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
74
0
'''simple docstring''' import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) _UpperCAmelCase : Optional[int] = '''\ Text data. Second line of data.''' _UpperCAmelCase : Optional[Any] = '''file''' @pytest.fixture(scope='session' ) def _SCREAMING_SNAKE_CASE ( __snake_case : Any ): _A = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd') _A = bytes(__snake_case , 'utf-8' ) with zstd.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] ): with open(os.path.join(tmpfs.local_root_dir , __snake_case ) , 'w' ) as f: f.write(__snake_case ) return FILE_PATH @pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] ) def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[str] , __snake_case : List[str] ): _A = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path} _A = input_paths[compression_format] _A = tmp_path / 'cache' _A = DownloadConfig(cache_dir=__snake_case , extract_compressed_file=__snake_case ) _A = cached_path(__snake_case , download_config=__snake_case ) with open(__snake_case ) as f: _A = f.read() with open(__snake_case ) as f: _A = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('default_extracted' , [True, False] ) @pytest.mark.parametrize('default_cache_dir' , [True, False] ) def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : List[str] ): _A = 'custom_cache' _A = 'custom_extracted_dir' _A = tmp_path / 'custom_extracted_path' if default_extracted: _A = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted') else: monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , __snake_case ) monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__snake_case ) ) _A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _A = xz_file _A = ( DownloadConfig(extract_compressed_file=__snake_case ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__snake_case ) ) _A = cached_path(__snake_case , download_config=__snake_case ) assert Path(__snake_case ).parent.parts[-2:] == expected def _SCREAMING_SNAKE_CASE ( __snake_case : str ): # absolute path _A = str(Path(__snake_case ).resolve() ) assert cached_path(__snake_case ) == text_file # relative path _A = str(Path(__snake_case ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(__snake_case ) == text_file def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ): # absolute path _A = str(tmp_path.resolve() / '__missing_file__.txt' ) with pytest.raises(__snake_case ): cached_path(__snake_case ) # relative path _A = './__missing_file__.txt' with pytest.raises(__snake_case ): cached_path(__snake_case ) def _SCREAMING_SNAKE_CASE ( __snake_case : int ): _A = get_from_cache(F'tmp://{tmpfs_file}' ) with open(__snake_case ) as f: _A = f.read() assert output_file_content == FILE_CONTENT @patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case ) def _SCREAMING_SNAKE_CASE ( ): with pytest.raises(__snake_case ): cached_path('https://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case ) def _SCREAMING_SNAKE_CASE ( __snake_case : Tuple ): _A = tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(__snake_case ): http_get('https://huggingface.co' , temp_file=__snake_case ) with pytest.raises(__snake_case ): http_head('https://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case ) def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] ): _A = tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(__snake_case ): ftp_get('ftp://huggingface.co' , temp_file=__snake_case ) with pytest.raises(__snake_case ): ftp_head('ftp://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case ) def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ): _A = tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(__snake_case ): fsspec_get('s3://huggingface.co' , temp_file=__snake_case ) with pytest.raises(__snake_case ): fsspec_head('s3://huggingface.co' )
107
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : Dict = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim __SCREAMING_SNAKE_CASE : Tuple = in_channels __SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A ) __SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A ) # 3. Define transformers blocks __SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList( [ BasicTransformerBlock( _A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , ) for d in range(_A ) ] ) __SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A ) def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape __SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames __SCREAMING_SNAKE_CASE : Dict = hidden_states __SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A ) __SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A ) # 2. Blocks for block in self.transformer_blocks: __SCREAMING_SNAKE_CASE : Optional[Any] = block( _A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , ) # 3. Output __SCREAMING_SNAKE_CASE : Any = self.proj_out(_A ) __SCREAMING_SNAKE_CASE : List[str] = ( hidden_states[None, None, :] .reshape(_A , _A , _A , _A , _A ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=_A )
74
0
import pytest import datasets # Import fixture modules as plugins __a: Optional[Any] = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec'''] def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> str: # Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit") for item in items: if any(marker in item.keywords for marker in ["""integration""", """unit"""] ): continue item.add_marker(pytest.mark.unit ) def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Optional[Any]: config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" ) @pytest.fixture(autouse=__snake_case ) def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Optional[Any]: # test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work? _UpperCAmelCase = tmp_path_factory.getbasetemp() / """cache""" _UpperCAmelCase = test_hf_cache_home / """datasets""" _UpperCAmelCase = test_hf_cache_home / """metrics""" _UpperCAmelCase = test_hf_cache_home / """modules""" monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(__snake_case ) ) monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(__snake_case ) ) monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(__snake_case ) ) _UpperCAmelCase = test_hf_datasets_cache / """downloads""" monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(__snake_case ) ) _UpperCAmelCase = test_hf_datasets_cache / """downloads""" / """extracted""" monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(__snake_case ) ) @pytest.fixture(autouse=__snake_case , scope="""session""" ) def _SCREAMING_SNAKE_CASE ( ) -> int: datasets.disable_progress_bar() @pytest.fixture(autouse=__snake_case ) def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Optional[Any]: # don't take tests into account when counting downloads monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , __snake_case ) @pytest.fixture def _SCREAMING_SNAKE_CASE ( __snake_case ) -> List[Any]: # Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0 # To be removed once SQLAlchemy 2.0 supported monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , __snake_case )
108
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase_ = """src/diffusers""" lowercase_ = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowercase_ = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase_ = spec.loader.load_module() def a__ ( snake_case , snake_case ): """simple docstring""" return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = object_name.split('''.''' ) __SCREAMING_SNAKE_CASE : str = 0 # First let's find the module where our object lives. __SCREAMING_SNAKE_CASE : Any = parts[i] while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ): i += 1 if i < len(snake_case ): __SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] ) if i >= len(snake_case ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Now let's find the class / func in the code! __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(snake_case ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __SCREAMING_SNAKE_CASE : List[Any] = line_index while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index] return "".join(snake_case ) lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowercase_ = re.compile(R"""<FILL\s+[^>]*>""") def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = code.split('''\n''' ) __SCREAMING_SNAKE_CASE : Dict = 0 while idx < len(snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(snake_case ): return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0 if has_indent: __SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}''' __SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case ) return result[len('''class Bla:\n''' ) :] if has_indent else result def a__ ( snake_case , snake_case=False ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[str] = f.readlines() __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(snake_case ): __SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups() __SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case ) __SCREAMING_SNAKE_CASE : str = get_indent(snake_case ) __SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2 __SCREAMING_SNAKE_CASE : Dict = theoretical_indent __SCREAMING_SNAKE_CASE : Optional[int] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __SCREAMING_SNAKE_CASE : List[Any] = True while line_index < len(snake_case ) and should_continue: line_index += 1 if line_index >= len(snake_case ): break __SCREAMING_SNAKE_CASE : Any = lines[line_index] __SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index] __SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case ) # Remove any nested `Copied from` comments to avoid circular copies __SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups() __SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case ) if option.strip() == "all-casing": __SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code ) __SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] __SCREAMING_SNAKE_CASE : str = start_index + 1 if overwrite and len(snake_case ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) return diffs def a__ ( snake_case = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = [] for filename in all_files: __SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase_ = parser.parse_args() check_copies(args.fix_and_overwrite)
74
0
'''simple docstring''' import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType a = logging.get_logger(__name__) class __a ( _snake_case ): __UpperCamelCase : Optional[int] = 'vision-encoder-decoder' __UpperCamelCase : Optional[int] = True def __init__( self : List[Any] ,**lowerCamelCase : List[Any] ): '''simple docstring''' super().__init__(**lowerCamelCase ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"""A configuraton of type {self.model_type} cannot be instantiated because """ f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" ) __SCREAMING_SNAKE_CASE = kwargs.pop("""encoder""" ) __SCREAMING_SNAKE_CASE = encoder_config.pop("""model_type""" ) __SCREAMING_SNAKE_CASE = kwargs.pop("""decoder""" ) __SCREAMING_SNAKE_CASE = decoder_config.pop("""model_type""" ) __SCREAMING_SNAKE_CASE = AutoConfig.for_model(lowerCamelCase ,**lowerCamelCase ) __SCREAMING_SNAKE_CASE = AutoConfig.for_model(lowerCamelCase ,**lowerCamelCase ) __SCREAMING_SNAKE_CASE = True @classmethod def UpperCAmelCase__ ( cls : Tuple ,lowerCamelCase : PretrainedConfig ,lowerCamelCase : PretrainedConfig ,**lowerCamelCase : Optional[int] ): '''simple docstring''' logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = True return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**lowerCamelCase ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) __SCREAMING_SNAKE_CASE = self.encoder.to_dict() __SCREAMING_SNAKE_CASE = self.decoder.to_dict() __SCREAMING_SNAKE_CASE = self.__class__.model_type return output class __a ( _snake_case ): __UpperCamelCase : Dict = version.parse('1.11' ) @property def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' return 1E-4 @property def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class __a ( _snake_case ): @property def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = OrderedDict() __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_decoder_sequence + sequence"""} __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_decoder_sequence + sequence"""} __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : "PreTrainedTokenizerBase" ,lowerCamelCase : int = -1 ,lowerCamelCase : int = -1 ,lowerCamelCase : bool = False ,lowerCamelCase : Optional["TensorType"] = None ,): '''simple docstring''' import torch __SCREAMING_SNAKE_CASE = OrderedDict() __SCREAMING_SNAKE_CASE = super().generate_dummy_inputs( lowerCamelCase ,batch_size=lowerCamelCase ,seq_length=lowerCamelCase ,is_pair=lowerCamelCase ,framework=lowerCamelCase ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dummy_input["""input_ids"""].shape __SCREAMING_SNAKE_CASE = (batch, encoder_sequence, self._config.encoder_hidden_size) __SCREAMING_SNAKE_CASE = dummy_input.pop("""input_ids""" ) __SCREAMING_SNAKE_CASE = dummy_input.pop("""attention_mask""" ) __SCREAMING_SNAKE_CASE = torch.zeros(lowerCamelCase ) return common_inputs class __a ( _snake_case ): @property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : PretrainedConfig ): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(lowerCamelCase ) def UpperCAmelCase__ ( self : Any ,lowerCamelCase : PretrainedConfig ,lowerCamelCase : PretrainedConfig ,lowerCamelCase : str = "default" ): '''simple docstring''' __SCREAMING_SNAKE_CASE = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(lowerCamelCase ,lowerCamelCase )
109
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : int = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Tuple = replicate(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = shard(_A ) __SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2''' __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained( _A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : List[str] = scheduler_params __SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A ) __SCREAMING_SNAKE_CASE : List[str] = shard(_A ) __SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
74
0
"""simple docstring""" from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class a : pass
110
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase_ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2FeatureExtractor"""] lowercase_ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
0
"""simple docstring""" import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ : str = logging.get_logger(__name__) lowercase_ : Optional[Any] = { '''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''', '''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''', '''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''', } class UpperCamelCase ( lowerCAmelCase__ ): A__ = """owlvit_text_model""" def __init__( self , snake_case__=49408 , snake_case__=512 , snake_case__=2048 , snake_case__=12 , snake_case__=8 , snake_case__=16 , snake_case__="quick_gelu" , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1.0 , snake_case__=0 , snake_case__=49406 , snake_case__=49407 , **snake_case__ , ): """simple docstring""" super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A ) _SCREAMING_SNAKE_CASE : Any = vocab_size _SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size _SCREAMING_SNAKE_CASE : Dict = intermediate_size _SCREAMING_SNAKE_CASE : int = num_hidden_layers _SCREAMING_SNAKE_CASE : List[str] = num_attention_heads _SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings _SCREAMING_SNAKE_CASE : int = hidden_act _SCREAMING_SNAKE_CASE : int = layer_norm_eps _SCREAMING_SNAKE_CASE : Tuple = attention_dropout _SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range _SCREAMING_SNAKE_CASE : Tuple = initializer_factor @classmethod def __SCREAMING_SNAKE_CASE ( cls , snake_case__ , **snake_case__ ): """simple docstring""" cls._set_token_in_kwargs(_A ) _SCREAMING_SNAKE_CASE : List[str] = cls.get_config_dict(_A , **_A ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": _SCREAMING_SNAKE_CASE : Optional[int] = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_A , **_A ) class UpperCamelCase ( lowerCAmelCase__ ): A__ = """owlvit_vision_model""" def __init__( self , snake_case__=768 , snake_case__=3072 , snake_case__=12 , snake_case__=12 , snake_case__=3 , snake_case__=768 , snake_case__=32 , snake_case__="quick_gelu" , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1.0 , **snake_case__ , ): """simple docstring""" super().__init__(**_A ) _SCREAMING_SNAKE_CASE : int = hidden_size _SCREAMING_SNAKE_CASE : Tuple = intermediate_size _SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers _SCREAMING_SNAKE_CASE : Tuple = num_attention_heads _SCREAMING_SNAKE_CASE : Optional[int] = num_channels _SCREAMING_SNAKE_CASE : List[str] = image_size _SCREAMING_SNAKE_CASE : Dict = patch_size _SCREAMING_SNAKE_CASE : Optional[int] = hidden_act _SCREAMING_SNAKE_CASE : Any = layer_norm_eps _SCREAMING_SNAKE_CASE : List[str] = attention_dropout _SCREAMING_SNAKE_CASE : Dict = initializer_range _SCREAMING_SNAKE_CASE : Optional[Any] = initializer_factor @classmethod def __SCREAMING_SNAKE_CASE ( cls , snake_case__ , **snake_case__ ): """simple docstring""" cls._set_token_in_kwargs(_A ) _SCREAMING_SNAKE_CASE : int = cls.get_config_dict(_A , **_A ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": _SCREAMING_SNAKE_CASE : Union[str, Any] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_A , **_A ) class UpperCamelCase ( lowerCAmelCase__ ): A__ = """owlvit""" A__ = True def __init__( self , snake_case__=None , snake_case__=None , snake_case__=512 , snake_case__=2.6_592 , snake_case__=True , **snake_case__ , ): """simple docstring""" super().__init__(**_A ) if text_config is None: _SCREAMING_SNAKE_CASE : Optional[Any] = {} logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." ) if vision_config is None: _SCREAMING_SNAKE_CASE : Union[str, Any] = {} logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." ) _SCREAMING_SNAKE_CASE : Any = OwlViTTextConfig(**_A ) _SCREAMING_SNAKE_CASE : Optional[Any] = OwlViTVisionConfig(**_A ) _SCREAMING_SNAKE_CASE : Tuple = projection_dim _SCREAMING_SNAKE_CASE : List[str] = logit_scale_init_value _SCREAMING_SNAKE_CASE : Any = return_dict _SCREAMING_SNAKE_CASE : Optional[Any] = 1.0 @classmethod def __SCREAMING_SNAKE_CASE ( cls , snake_case__ , **snake_case__ ): """simple docstring""" cls._set_token_in_kwargs(_A ) _SCREAMING_SNAKE_CASE : Tuple = cls.get_config_dict(_A , **_A ) if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_A , **_A ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , snake_case__ , snake_case__ , **snake_case__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : str = {} _SCREAMING_SNAKE_CASE : Union[str, Any] = text_config _SCREAMING_SNAKE_CASE : Optional[int] = vision_config return cls.from_dict(_A , **_A ) def __SCREAMING_SNAKE_CASE ( self ): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(self.__dict__ ) _SCREAMING_SNAKE_CASE : str = self.text_config.to_dict() _SCREAMING_SNAKE_CASE : Optional[int] = self.vision_config.to_dict() _SCREAMING_SNAKE_CASE : Any = self.__class__.model_type return output class UpperCamelCase ( lowerCAmelCase__ ): @property def __SCREAMING_SNAKE_CASE ( self ): """simple docstring""" return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("attention_mask", {0: "batch", 1: "sequence"}), ] ) @property def __SCREAMING_SNAKE_CASE ( self ): """simple docstring""" return OrderedDict( [ ("logits_per_image", {0: "batch"}), ("logits_per_text", {0: "batch"}), ("text_embeds", {0: "batch"}), ("image_embeds", {0: "batch"}), ] ) @property def __SCREAMING_SNAKE_CASE ( self ): """simple docstring""" return 1E-4 def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = None , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = super().generate_dummy_inputs( processor.tokenizer , batch_size=_A , seq_length=_A , framework=_A ) _SCREAMING_SNAKE_CASE : Optional[int] = super().generate_dummy_inputs( processor.image_processor , batch_size=_A , framework=_A ) return {**text_input_dict, **image_input_dict} @property def __SCREAMING_SNAKE_CASE ( self ): """simple docstring""" return 14
572
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileBertTokenizer lowerCAmelCase_ = MobileBertTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = filter_non_english lowerCAmelCase_ = '''google/mobilebert-uncased''' def UpperCAmelCase__ ( self : Dict ): """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __SCREAMING_SNAKE_CASE : int = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running''' return input_text, output_text def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase__ ( self : int ): """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) # With lower casing __SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __SCREAMING_SNAKE_CASE : Dict = {} for i, token in enumerate(_A ): __SCREAMING_SNAKE_CASE : List[str] = i __SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def UpperCAmelCase__ ( self : str ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus( _A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False __SCREAMING_SNAKE_CASE : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有'''] __SCREAMING_SNAKE_CASE : int = ''''''.join(_A ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE : List[Any] = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A ) ] self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A )
74
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ShapEPipeline __SCREAMING_SNAKE_CASE = ["""prompt"""] __SCREAMING_SNAKE_CASE = ["""prompt"""] __SCREAMING_SNAKE_CASE = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] __SCREAMING_SNAKE_CASE = False @property def UpperCamelCase__ (self ) -> Optional[Any]: """simple docstring""" return 32 @property def UpperCamelCase__ (self ) -> Any: """simple docstring""" return 32 @property def UpperCamelCase__ (self ) -> Union[str, Any]: """simple docstring""" return self.time_input_dim * 4 @property def UpperCamelCase__ (self ) -> List[str]: """simple docstring""" return 8 @property def UpperCamelCase__ (self ) -> Tuple: """simple docstring""" UpperCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def UpperCamelCase__ (self ) -> Dict: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(_A ) @property def UpperCamelCase__ (self ) -> Any: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase__ = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } UpperCAmelCase__ = PriorTransformer(**_A ) return model @property def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase__ = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } UpperCAmelCase__ = ShapERenderer(**_A ) return model def UpperCamelCase__ (self ) -> Dict: """simple docstring""" UpperCAmelCase__ = self.dummy_prior UpperCAmelCase__ = self.dummy_text_encoder UpperCAmelCase__ = self.dummy_tokenizer UpperCAmelCase__ = self.dummy_renderer UpperCAmelCase__ = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , ) UpperCAmelCase__ = { '''prior''': prior, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''renderer''': renderer, '''scheduler''': scheduler, } return components def UpperCamelCase__ (self , __a , __a=0 ) -> Union[str, Any]: """simple docstring""" if str(_A ).startswith('mps' ): UpperCAmelCase__ = torch.manual_seed(_A ) else: UpperCAmelCase__ = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase__ = { '''prompt''': '''horse''', '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def UpperCamelCase__ (self ) -> str: """simple docstring""" UpperCAmelCase__ = '''cpu''' UpperCAmelCase__ = self.get_dummy_components() UpperCAmelCase__ = self.pipeline_class(**_A ) UpperCAmelCase__ = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ = pipe(**self.get_dummy_inputs(_A ) ) UpperCAmelCase__ = output.images[0] UpperCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCAmelCase__ = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ (self ) -> List[str]: """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCamelCase__ (self ) -> Tuple: """simple docstring""" UpperCAmelCase__ = torch_device == '''cpu''' UpperCAmelCase__ = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=_A , relax_max_difference=_A , ) def UpperCamelCase__ (self ) -> Any: """simple docstring""" UpperCAmelCase__ = self.get_dummy_components() UpperCAmelCase__ = self.pipeline_class(**_A ) UpperCAmelCase__ = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ = 1 UpperCAmelCase__ = 2 UpperCAmelCase__ = self.get_dummy_inputs(_A ) for key in inputs.keys(): if key in self.batch_params: UpperCAmelCase__ = batch_size * [inputs[key]] UpperCAmelCase__ = pipe(**_A , num_images_per_prompt=_A )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowercase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self ) -> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ (self ) -> Any: """simple docstring""" UpperCAmelCase__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_np_out.npy' ) UpperCAmelCase__ = ShapEPipeline.from_pretrained('openai/shap-e' ) UpperCAmelCase__ = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ = torch.Generator(device=_A ).manual_seed(0 ) UpperCAmelCase__ = pipe( 'a shark' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(_A , _A )
146
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ): """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
74
0
'''simple docstring''' import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class a_ ( lowerCAmelCase__ ): lowercase = 42 lowercase = None def lowercase__ ( __UpperCamelCase , __UpperCamelCase=0.999 , __UpperCamelCase="cosine" , )-> Tuple: if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCamelCase ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCamelCase ): return math.exp(t * -12.0 ) else: raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" ) UpperCamelCase = [] for i in range(__UpperCamelCase ): UpperCamelCase = i / num_diffusion_timesteps UpperCamelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) , __UpperCamelCase ) ) return torch.tensor(__UpperCamelCase , dtype=torch.floataa ) class a_ ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self , _SCREAMING_SNAKE_CASE = 1000 , _SCREAMING_SNAKE_CASE = "fixed_small_log" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1.0 , _SCREAMING_SNAKE_CASE = "epsilon" , _SCREAMING_SNAKE_CASE = "squaredcos_cap_v2" , ) -> List[str]: """simple docstring""" if beta_schedule != "squaredcos_cap_v2": raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'""" ) UpperCamelCase = betas_for_alpha_bar(_A ) UpperCamelCase = 1.0 - self.betas UpperCamelCase = torch.cumprod(self.alphas , dim=0 ) UpperCamelCase = torch.tensor(1.0 ) # standard deviation of the initial noise distribution UpperCamelCase = 1.0 # setable values UpperCamelCase = None UpperCamelCase = torch.from_numpy(np.arange(0 , _A )[::-1].copy() ) UpperCamelCase = variance_type def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Any: """simple docstring""" return sample def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Optional[int]: """simple docstring""" UpperCamelCase = num_inference_steps UpperCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) UpperCamelCase = (np.arange(0 , _A ) * step_ratio).round()[::-1].copy().astype(np.intaa ) UpperCamelCase = torch.from_numpy(_A ).to(_A ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Any: """simple docstring""" if prev_timestep is None: UpperCamelCase = t - 1 UpperCamelCase = self.alphas_cumprod[t] UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCamelCase = 1 - alpha_prod_t UpperCamelCase = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCamelCase = self.betas[t] else: UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCamelCase = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: UpperCamelCase = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": UpperCamelCase = torch.log(torch.clamp(_A , min=1e-20 ) ) UpperCamelCase = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler UpperCamelCase = variance.log() UpperCamelCase = beta.log() UpperCamelCase = (predicted_variance + 1) / 2 UpperCamelCase = frac * max_log + (1 - frac) * min_log return variance def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ) -> Optional[int]: """simple docstring""" UpperCamelCase = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": UpperCamelCase = torch.split(_A , sample.shape[1] , dim=1 ) else: UpperCamelCase = None # 1. compute alphas, betas if prev_timestep is None: UpperCamelCase = t - 1 UpperCamelCase = self.alphas_cumprod[t] UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCamelCase = 1 - alpha_prod_t UpperCamelCase = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCamelCase = self.betas[t] UpperCamelCase = self.alphas[t] else: UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev UpperCamelCase = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCamelCase = model_output else: raise ValueError( F"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`" """ for the UnCLIPScheduler.""" ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCamelCase = torch.clamp( _A , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t UpperCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise UpperCamelCase = 0 if t > 0: UpperCamelCase = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=_A , device=model_output.device ) UpperCamelCase = self._get_variance( _A , predicted_variance=_A , prev_timestep=_A , ) if self.variance_type == "fixed_small_log": UpperCamelCase = variance elif self.variance_type == "learned_range": UpperCamelCase = (0.5 * variance).exp() else: raise ValueError( F"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`" """ for the UnCLIPScheduler.""" ) UpperCamelCase = variance * variance_noise UpperCamelCase = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=_A , pred_original_sample=_A ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" UpperCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) UpperCamelCase = timesteps.to(original_samples.device ) UpperCamelCase = alphas_cumprod[timesteps] ** 0.5 UpperCamelCase = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): UpperCamelCase = sqrt_alpha_prod.unsqueeze(-1 ) UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCamelCase = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): UpperCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
301
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast lowercase_ = datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase_ = 1_00_00 lowerCAmelCase_ = None lowerCAmelCase_ = None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase_ = ParquetConfig def UpperCAmelCase__ ( self : Any ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ): """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): __SCREAMING_SNAKE_CASE : Tuple = data_files if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __SCREAMING_SNAKE_CASE : int = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_A ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) ) break splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) ) return splits def UpperCAmelCase__ ( self : str , _A : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' ) raise
74
0
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class __lowercase ( lowerCAmelCase__ ): '''simple docstring''' _A : Union[str, Any] = 42 _A : Any = jnp.floataa _A : str = True def A_ ( self : Optional[Any] ): super().setup() UpperCamelCase__ = nn.Dense(5 , dtype=self.dtype ) def __call__( self : Tuple , *_a : List[Any] , **_a : Optional[int] ): UpperCamelCase__ = super().__call__(*_A , **_A ) UpperCamelCase__ = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class __lowercase ( lowerCAmelCase__ ): '''simple docstring''' _A : Tuple = FlaxBigBirdForNaturalQuestionsModule def lowerCamelCase_ ( UpperCamelCase__ : List[Any], UpperCamelCase__ : Any, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Tuple, UpperCamelCase__ : str ): '''simple docstring''' def cross_entropy(UpperCamelCase__ : Tuple, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Dict=None ): UpperCamelCase__ = logits.shape[-1] UpperCamelCase__ = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype('''f4''' ) UpperCamelCase__ = jax.nn.log_softmax(UpperCamelCase__, axis=-1 ) UpperCamelCase__ = -jnp.sum(labels * logits, axis=-1 ) if reduction is not None: UpperCamelCase__ = reduction(UpperCamelCase__ ) return loss UpperCamelCase__ = partial(UpperCamelCase__, reduction=jnp.mean ) UpperCamelCase__ = cross_entropy(UpperCamelCase__, UpperCamelCase__ ) UpperCamelCase__ = cross_entropy(UpperCamelCase__, UpperCamelCase__ ) UpperCamelCase__ = cross_entropy(UpperCamelCase__, UpperCamelCase__ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class __lowercase : '''simple docstring''' _A : str = '''google/bigbird-roberta-base''' _A : List[Any] = 3000 _A : Any = 1_0500 _A : str = 128 _A : Union[str, Any] = 3 _A : List[Any] = 1 _A : Union[str, Any] = 5 # tx_args _A : Optional[int] = 3e-5 _A : Optional[int] = 0.0 _A : Dict = 2_0000 _A : str = 0.00_95 _A : Dict = '''bigbird-roberta-natural-questions''' _A : Optional[Any] = '''training-expt''' _A : int = '''data/nq-training.jsonl''' _A : str = '''data/nq-validation.jsonl''' def A_ ( self : Optional[int] ): os.makedirs(self.base_dir , exist_ok=_A ) UpperCamelCase__ = os.path.join(self.base_dir , self.save_dir ) UpperCamelCase__ = self.batch_size_per_device * jax.device_count() @dataclass class __lowercase : '''simple docstring''' _A : List[str] = 42 _A : Optional[Any] = 4096 # no dynamic padding on TPUs def __call__( self : List[str] , _a : int ): UpperCamelCase__ = self.collate_fn(_A ) UpperCamelCase__ = jax.tree_util.tree_map(_A , _A ) return batch def A_ ( self : List[Any] , _a : Optional[Any] ): UpperCamelCase__ = self.fetch_inputs(features['''input_ids'''] ) UpperCamelCase__ = { '''input_ids''': jnp.array(_A , dtype=jnp.intaa ), '''attention_mask''': jnp.array(_A , dtype=jnp.intaa ), '''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ), '''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ), '''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ), } return batch def A_ ( self : Any , _a : list ): UpperCamelCase__ = [self._fetch_inputs(_A ) for ids in input_ids] return zip(*_A ) def A_ ( self : List[str] , _a : list ): UpperCamelCase__ = [1 for _ in range(len(_A ) )] while len(_A ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def lowerCamelCase_ ( UpperCamelCase__ : Optional[int], UpperCamelCase__ : str, UpperCamelCase__ : Union[str, Any]=None ): '''simple docstring''' if seed is not None: UpperCamelCase__ = dataset.shuffle(seed=UpperCamelCase__ ) for i in range(len(UpperCamelCase__ ) // batch_size ): UpperCamelCase__ = dataset[i * batch_size : (i + 1) * batch_size] yield dict(UpperCamelCase__ ) @partial(jax.pmap, axis_name='''batch''' ) def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[Any], **UpperCamelCase__ : Any ): '''simple docstring''' def loss_fn(UpperCamelCase__ : List[Any] ): UpperCamelCase__ = model_inputs.pop('''start_labels''' ) UpperCamelCase__ = model_inputs.pop('''end_labels''' ) UpperCamelCase__ = model_inputs.pop('''pooled_labels''' ) UpperCamelCase__ = state.apply_fn(**UpperCamelCase__, params=UpperCamelCase__, dropout_rng=UpperCamelCase__, train=UpperCamelCase__ ) UpperCamelCase__ = outputs return state.loss_fn( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, ) UpperCamelCase__ = jax.random.split(UpperCamelCase__ ) UpperCamelCase__ = jax.value_and_grad(UpperCamelCase__ ) UpperCamelCase__ = grad_fn(state.params ) UpperCamelCase__ = jax.lax.pmean({'''loss''': loss}, axis_name='''batch''' ) UpperCamelCase__ = jax.lax.pmean(UpperCamelCase__, '''batch''' ) UpperCamelCase__ = state.apply_gradients(grads=UpperCamelCase__ ) return state, metrics, new_drp_rng @partial(jax.pmap, axis_name='''batch''' ) def lowerCamelCase_ ( UpperCamelCase__ : str, **UpperCamelCase__ : int ): '''simple docstring''' UpperCamelCase__ = model_inputs.pop('''start_labels''' ) UpperCamelCase__ = model_inputs.pop('''end_labels''' ) UpperCamelCase__ = model_inputs.pop('''pooled_labels''' ) UpperCamelCase__ = state.apply_fn(**UpperCamelCase__, params=state.params, train=UpperCamelCase__ ) UpperCamelCase__ = outputs UpperCamelCase__ = state.loss_fn(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) UpperCamelCase__ = jax.lax.pmean({'''loss''': loss}, axis_name='''batch''' ) return metrics class __lowercase ( train_state.TrainState ): '''simple docstring''' _A : int = struct.field(pytree_node=lowerCAmelCase__ ) @dataclass class __lowercase : '''simple docstring''' _A : str = 42 _A : Any = 42 _A : Optional[Any] = 42 _A : Union[str, Any] = 42 _A : Dict = 42 _A : int = 42 _A : Tuple = None def A_ ( self : int , _a : Optional[Any] , _a : str , _a : Optional[int] , _a : List[str]=None ): UpperCamelCase__ = model.params UpperCamelCase__ = TrainState.create( apply_fn=model.__call__ , params=_A , tx=_A , loss_fn=_A , ) if ckpt_dir is not None: UpperCamelCase__ = restore_checkpoint(_A , _A ) UpperCamelCase__ = { '''lr''': args.lr, '''init_lr''': args.init_lr, '''warmup_steps''': args.warmup_steps, '''num_train_steps''': num_train_steps, '''weight_decay''': args.weight_decay, } UpperCamelCase__ = build_tx(**_A ) UpperCamelCase__ = train_state.TrainState( step=_A , apply_fn=model.__call__ , params=_A , tx=_A , opt_state=_A , ) UpperCamelCase__ = args UpperCamelCase__ = data_collator UpperCamelCase__ = lr UpperCamelCase__ = params UpperCamelCase__ = jax_utils.replicate(_A ) return state def A_ ( self : Optional[int] , _a : List[str] , _a : int , _a : List[Any] ): UpperCamelCase__ = self.args UpperCamelCase__ = len(_A ) // args.batch_size UpperCamelCase__ = jax.random.PRNGKey(0 ) UpperCamelCase__ = jax.random.split(_A , jax.device_count() ) for epoch in range(args.max_epochs ): UpperCamelCase__ = jnp.array(0 , dtype=jnp.floataa ) UpperCamelCase__ = get_batched_dataset(_A , args.batch_size , seed=_A ) UpperCamelCase__ = 0 for batch in tqdm(_A , total=_A , desc=F"""Running EPOCH-{epoch}""" ): UpperCamelCase__ = self.data_collator(_A ) UpperCamelCase__ = self.train_step_fn(_A , _A , **_A ) running_loss += jax_utils.unreplicate(metrics['''loss'''] ) i += 1 if i % args.logging_steps == 0: UpperCamelCase__ = jax_utils.unreplicate(state.step ) UpperCamelCase__ = running_loss.item() / i UpperCamelCase__ = self.scheduler_fn(state_step - 1 ) UpperCamelCase__ = self.evaluate(_A , _A ) UpperCamelCase__ = { '''step''': state_step.item(), '''eval_loss''': eval_loss.item(), '''tr_loss''': tr_loss, '''lr''': lr.item(), } tqdm.write(str(_A ) ) self.logger.log(_A , commit=_A ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=_A ) def A_ ( self : int , _a : Any , _a : Optional[int] ): UpperCamelCase__ = get_batched_dataset(_A , self.args.batch_size ) UpperCamelCase__ = len(_A ) // self.args.batch_size UpperCamelCase__ = jnp.array(0 , dtype=jnp.floataa ) UpperCamelCase__ = 0 for batch in tqdm(_A , total=_A , desc='''Evaluating ... ''' ): UpperCamelCase__ = self.data_collator(_A ) UpperCamelCase__ = self.val_step_fn(_A , **_A ) running_loss += jax_utils.unreplicate(metrics['''loss'''] ) i += 1 return running_loss / i def A_ ( self : Optional[Any] , _a : Optional[Any] , _a : List[str] ): UpperCamelCase__ = jax_utils.unreplicate(_A ) print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=''' ... ''' ) self.model_save_fn(_A , params=state.params ) with open(os.path.join(_A , '''opt_state.msgpack''' ) , '''wb''' ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(_A , '''args.joblib''' ) ) joblib.dump(self.data_collator , os.path.join(_A , '''data_collator.joblib''' ) ) with open(os.path.join(_A , '''training_state.json''' ) , '''w''' ) as f: json.dump({'''step''': state.step.item()} , _A ) print('''DONE''' ) def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Tuple ): '''simple docstring''' print(F"""RESTORING CHECKPOINT FROM {save_dir}""", end=''' ... ''' ) with open(os.path.join(UpperCamelCase__, '''flax_model.msgpack''' ), '''rb''' ) as f: UpperCamelCase__ = from_bytes(state.params, f.read() ) with open(os.path.join(UpperCamelCase__, '''opt_state.msgpack''' ), '''rb''' ) as f: UpperCamelCase__ = from_bytes(state.opt_state, f.read() ) UpperCamelCase__ = joblib.load(os.path.join(UpperCamelCase__, '''args.joblib''' ) ) UpperCamelCase__ = joblib.load(os.path.join(UpperCamelCase__, '''data_collator.joblib''' ) ) with open(os.path.join(UpperCamelCase__, '''training_state.json''' ), '''r''' ) as f: UpperCamelCase__ = json.load(UpperCamelCase__ ) UpperCamelCase__ = training_state['''step'''] print('''DONE''' ) return params, opt_state, step, args, data_collator def lowerCamelCase_ ( UpperCamelCase__ : List[str], UpperCamelCase__ : Dict, UpperCamelCase__ : Tuple, UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' UpperCamelCase__ = num_train_steps - warmup_steps UpperCamelCase__ = optax.linear_schedule(init_value=UpperCamelCase__, end_value=UpperCamelCase__, transition_steps=UpperCamelCase__ ) UpperCamelCase__ = optax.linear_schedule(init_value=UpperCamelCase__, end_value=1e-7, transition_steps=UpperCamelCase__ ) UpperCamelCase__ = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[warmup_steps] ) return lr def lowerCamelCase_ ( UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[int] ): '''simple docstring''' def weight_decay_mask(UpperCamelCase__ : List[str] ): UpperCamelCase__ = traverse_util.flatten_dict(UpperCamelCase__ ) UpperCamelCase__ = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()} return traverse_util.unflatten_dict(UpperCamelCase__ ) UpperCamelCase__ = scheduler_fn(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) UpperCamelCase__ = optax.adamw(learning_rate=UpperCamelCase__, weight_decay=UpperCamelCase__, mask=UpperCamelCase__ ) return tx, lr
240
from math import isclose, sqrt def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x __SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4 __SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100 __SCREAMING_SNAKE_CASE : str = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __SCREAMING_SNAKE_CASE : int = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus __SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a__ ( snake_case = 1.4 , snake_case = -9.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : float = first_x_coord __SCREAMING_SNAKE_CASE : float = first_y_coord __SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
74
0
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig snake_case_ : Union[str, Any] = { 'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json', 'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json', } class lowercase__ ( lowerCAmelCase__ ): lowercase__ = """ernie_m""" lowercase__ = {"""dropout""": """classifier_dropout""", """num_classes""": """num_labels"""} def __init__( self : List[str] ,lowerCamelCase__ : int = 250002 ,lowerCamelCase__ : int = 768 ,lowerCamelCase__ : int = 12 ,lowerCamelCase__ : int = 12 ,lowerCamelCase__ : int = 3072 ,lowerCamelCase__ : str = "gelu" ,lowerCamelCase__ : float = 0.1 ,lowerCamelCase__ : float = 0.1 ,lowerCamelCase__ : int = 514 ,lowerCamelCase__ : float = 0.0_2 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : float = 1E-05 ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Optional[Any]=False ,lowerCamelCase__ : int=0.0 ,**lowerCamelCase__ : List[str] ,): '''simple docstring''' super().__init__(pad_token_id=_A ,**_A ) _UpperCamelCase : Any = vocab_size _UpperCamelCase : List[Any] = hidden_size _UpperCamelCase : List[Any] = num_hidden_layers _UpperCamelCase : Tuple = num_attention_heads _UpperCamelCase : Optional[int] = intermediate_size _UpperCamelCase : List[Any] = hidden_act _UpperCamelCase : int = hidden_dropout_prob _UpperCamelCase : str = attention_probs_dropout_prob _UpperCamelCase : Optional[int] = max_position_embeddings _UpperCamelCase : Optional[Any] = initializer_range _UpperCamelCase : Tuple = layer_norm_eps _UpperCamelCase : List[Any] = classifier_dropout _UpperCamelCase : str = is_decoder _UpperCamelCase : Tuple = act_dropout
195
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20} __SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : int = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : Optional[Any] = num_channels __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : int = min_resolution __SCREAMING_SNAKE_CASE : Optional[int] = max_resolution __SCREAMING_SNAKE_CASE : List[Any] = do_resize __SCREAMING_SNAKE_CASE : Union[str, Any] = size __SCREAMING_SNAKE_CASE : str = do_center_crop __SCREAMING_SNAKE_CASE : Any = crop_size def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''crop_size''' ) ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : int ): """simple docstring""" pass def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
74
0
"""simple docstring""" import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger("""transformers.models.speecht5""") def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ) -> int: hf_model.apply_weight_norm() lowerCAmelCase__ : Tuple = checkpoint['''input_conv.weight_g'''] lowerCAmelCase__ : Any = checkpoint['''input_conv.weight_v'''] lowerCAmelCase__ : Optional[int] = checkpoint['''input_conv.bias'''] for i in range(len(config.upsample_rates ) ): lowerCAmelCase__ : Any = checkpoint[F"""upsamples.{i}.1.weight_g"""] lowerCAmelCase__ : List[Any] = checkpoint[F"""upsamples.{i}.1.weight_v"""] lowerCAmelCase__ : Union[str, Any] = checkpoint[F"""upsamples.{i}.1.bias"""] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): lowerCAmelCase__ : Union[str, Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""] lowerCAmelCase__ : str = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""] lowerCAmelCase__ : int = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""] lowerCAmelCase__ : Union[str, Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""] lowerCAmelCase__ : Dict = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""] lowerCAmelCase__ : Dict = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""] lowerCAmelCase__ : List[Any] = checkpoint['''output_conv.1.weight_g'''] lowerCAmelCase__ : Tuple = checkpoint['''output_conv.1.weight_v'''] lowerCAmelCase__ : List[Any] = checkpoint['''output_conv.1.bias'''] hf_model.remove_weight_norm() @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , ) -> List[Any]: if config_path is not None: lowerCAmelCase__ : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(lowercase__ ) else: lowerCAmelCase__ : Union[str, Any] = SpeechTaHifiGanConfig() lowerCAmelCase__ : Optional[Any] = SpeechTaHifiGan(lowercase__ ) lowerCAmelCase__ : str = torch.load(lowercase__ ) load_weights(orig_checkpoint["model"]["generator"] , lowercase__ , lowercase__ ) lowerCAmelCase__ : Any = np.load(lowercase__ ) lowerCAmelCase__ : List[Any] = stats[0].reshape(-1 ) lowerCAmelCase__ : Optional[Any] = stats[1].reshape(-1 ) lowerCAmelCase__ : Optional[int] = torch.from_numpy(lowercase__ ).float() lowerCAmelCase__ : List[Any] = torch.from_numpy(lowercase__ ).float() model.save_pretrained(lowercase__ ) if repo_id: print("Pushing to the hub..." ) model.push_to_hub(lowercase__ ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) _UpperCamelCase = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
453
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )] # initialize interval's left pointer and right pointer __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0 for i in range(1 , len(snake_case ) ): # case when current index is inside the interval if i <= right_pointer: __SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __SCREAMING_SNAKE_CASE : Dict = min_edge while go_next(snake_case , snake_case , snake_case ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1 return z_result def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]] def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(snake_case ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
74
0
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowercase ( unittest.TestCase ): def a__ ( self ) -> Union[str, Any]: super().tearDown() gc.collect() def a__ ( self ) -> Optional[int]: _A : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) _A : Optional[Any] = '''A painting of a squirrel eating a burger''' _A : int = jax.device_count() _A : Tuple = num_samples * [prompt] _A : Optional[Any] = sd_pipe.prepare_inputs(_A ) _A : Tuple = replicate(_A ) _A : Optional[int] = shard(_A ) _A : Dict = jax.random.PRNGKey(0 ) _A : Optional[int] = jax.random.split(_A , jax.device_count() ) _A : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) _A : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _A : Union[str, Any] = images[0, 253:256, 253:256, -1] _A : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _A : Tuple = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def a__ ( self ) -> Dict: _A : List[str] = '''stabilityai/stable-diffusion-2''' _A : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder="""scheduler""" ) _A : int = FlaxStableDiffusionPipeline.from_pretrained( _A , scheduler=_A , revision="""bf16""" , dtype=jnp.bfloataa , ) _A : List[str] = scheduler_params _A : Tuple = '''A painting of a squirrel eating a burger''' _A : List[Any] = jax.device_count() _A : Tuple = num_samples * [prompt] _A : Any = sd_pipe.prepare_inputs(_A ) _A : Optional[int] = replicate(_A ) _A : List[str] = shard(_A ) _A : int = jax.random.PRNGKey(0 ) _A : Union[str, Any] = jax.random.split(_A , jax.device_count() ) _A : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) _A : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _A : Dict = images[0, 253:256, 253:256, -1] _A : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _A : Union[str, Any] = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
307
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { "google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json", "google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json", "google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json", "google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class _a (lowerCAmelCase__): """simple docstring""" SCREAMING_SNAKE_CASE = 'mobilenet_v2' def __init__( self , A__=3 , A__=2_24 , A__=1.0 , A__=8 , A__=8 , A__=6 , A__=32 , A__=True , A__=True , A__="relu6" , A__=True , A__=0.8 , A__=0.02 , A__=0.001 , A__=2_55 , **A__ , ) -> int: super().__init__(**_A ) if depth_multiplier <= 0: raise ValueError("""depth_multiplier must be greater than zero.""" ) _SCREAMING_SNAKE_CASE = num_channels _SCREAMING_SNAKE_CASE = image_size _SCREAMING_SNAKE_CASE = depth_multiplier _SCREAMING_SNAKE_CASE = depth_divisible_by _SCREAMING_SNAKE_CASE = min_depth _SCREAMING_SNAKE_CASE = expand_ratio _SCREAMING_SNAKE_CASE = output_stride _SCREAMING_SNAKE_CASE = first_layer_is_expansion _SCREAMING_SNAKE_CASE = finegrained_output _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = tf_padding _SCREAMING_SNAKE_CASE = classifier_dropout_prob _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = layer_norm_eps _SCREAMING_SNAKE_CASE = semantic_loss_ignore_index class _a (lowerCAmelCase__): """simple docstring""" SCREAMING_SNAKE_CASE = version.parse('1.11') @property def UpperCamelCase ( self ) -> Union[str, Any]: return OrderedDict([("""pixel_values""", {0: """batch"""})] ) @property def UpperCamelCase ( self ) -> Optional[int]: if self.task == "image-classification": return OrderedDict([("""logits""", {0: """batch"""})] ) else: return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] ) @property def UpperCamelCase ( self ) -> List[Any]: return 1E-4
591
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig() # derive patch size from model name __SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 __SCREAMING_SNAKE_CASE : Optional[Any] = 12 __SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 __SCREAMING_SNAKE_CASE : int = 4_096 __SCREAMING_SNAKE_CASE : Tuple = 16 __SCREAMING_SNAKE_CASE : Optional[int] = 24 __SCREAMING_SNAKE_CASE : Optional[int] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 if model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Any = 336 __SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Any = 768 return config def a__ ( snake_case ): """simple docstring""" # text encoder if name == "token_embedding.weight": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' ) if key.startswith('''visual''' ): __SCREAMING_SNAKE_CASE : List[Any] = key_split[3] __SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Optional[Any] = val[ :dim ] __SCREAMING_SNAKE_CASE : Tuple = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim: ] else: if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : str = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Dict = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[-dim:] elif key.startswith('''mit''' ): __SCREAMING_SNAKE_CASE : List[str] = key_split[2] __SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : str = val[:dim, :] __SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Any = val[:dim] __SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2] __SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:] else: __SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2] __SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[:dim, :] __SCREAMING_SNAKE_CASE : int = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Dict = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Tuple = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : int = val[-dim:] else: __SCREAMING_SNAKE_CASE : int = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __SCREAMING_SNAKE_CASE : int = val.T __SCREAMING_SNAKE_CASE : Union[str, Any] = val return orig_state_dict def a__ ( snake_case ): """simple docstring""" if num_frames == 8: __SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy''' elif num_frames == 16: __SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy''' elif num_frames == 32: __SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy''' __SCREAMING_SNAKE_CASE : List[str] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , ) __SCREAMING_SNAKE_CASE : int = np.load(snake_case ) return list(snake_case ) def a__ ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name] __SCREAMING_SNAKE_CASE : Any = 8 if "16-frames" in model_name: __SCREAMING_SNAKE_CASE : Optional[int] = 16 elif "shot" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 32 __SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin''' gdown.cached_download(snake_case , snake_case , quiet=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model'''] else: __SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224 __SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case ) __SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) __SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case ) # Verify outputs __SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video __SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 ) print('''Probs:''' , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] ) elif model_name == "xclip-base-patch16": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] ) elif model_name == "xclip-large-patch14": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case , organization='''nielsr''' ) processor.push_to_hub(snake_case , organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
0
from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = None ) -> Optional[Any]: """simple docstring""" if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release: # old versions of hfh don't url-encode the file path lowerCAmelCase__ = quote(snake_case__ ) return hfh.hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' , revision=snake_case__ )
193
from pathlib import Path import fire def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = Path(snake_case ) __SCREAMING_SNAKE_CASE : Dict = Path(snake_case ) dest_dir.mkdir(exist_ok=snake_case ) for path in src_dir.iterdir(): __SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] __SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name ) print(snake_case ) dest_path.open('''w''' ).write('''\n'''.join(snake_case ) ) if __name__ == "__main__": fire.Fire(minify)
74
0
'''simple docstring''' import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class _lowerCAmelCase ( lowerCAmelCase__ ): """simple docstring""" snake_case_ = ComputeEnvironment.AMAZON_SAGEMAKER snake_case_ = True snake_case_ = "ml.p3.2xlarge" snake_case_ = "accelerate_sagemaker_execution_role" snake_case_ = "hf-sm" snake_case_ = "us-east-1" snake_case_ = 1 snake_case_ = "accelerate-sagemaker-1" snake_case_ = "1.6" snake_case_ = "4.4" snake_case_ = "train.py" snake_case_ = [ "--model_name_or_path", "bert", "--do_train", "False", "--epochs", "3", "--learning_rate", "5e-5", "--max_steps", "50.5", ] snake_case_ = [ "--model_name_or_path", "bert", "--do_train", "--do_test", "False", "--do_predict", "--epochs", "3", "--learning_rate", "5e-5", "--max_steps", "50.5", ] class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Tuple )-> int: snake_case = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args["""model_name_or_path"""] , _A ) assert isinstance(converted_args["""do_train"""] , _A ) assert isinstance(converted_args["""epochs"""] , _A ) assert isinstance(converted_args["""learning_rate"""] , _A ) assert isinstance(converted_args["""max_steps"""] , _A ) with pytest.raises(_A ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
369
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 ) __SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 ) __SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
74
0